blob: 82489923af09a6ae048a50c1bf471e43f9260492 [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35 * Both of these are protected by a mutex, dma_list_mutex.
36 *
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
39 *
Dan Williamsd379b012007-07-09 11:56:42 -070040 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
Chris Leechc13c8262006-05-23 17:18:44 -070042 *
43 * Each device has a kref, which is initialized to 1 when the device is
Dan Williamsd379b012007-07-09 11:56:42 -070044 * registered. A kref_get is done for each class_device registered. When the
Chris Leechc13c8262006-05-23 17:18:44 -070045 * class_device is released, the coresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the coresponding kref_put
48 * happens. The device's release function does a completion, so
49 * unregister_device does a remove event, class_device_unregister, a kref_put
50 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
Dan Williamsd379b012007-07-09 11:56:42 -070054 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
56 * a channel is removed or a client using it is unregesitered. A client can
57 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
Chris Leechc13c8262006-05-23 17:18:44 -070060 */
61
62#include <linux/init.h>
63#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070064#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070065#include <linux/device.h>
66#include <linux/dmaengine.h>
67#include <linux/hardirq.h>
68#include <linux/spinlock.h>
69#include <linux/percpu.h>
70#include <linux/rcupdate.h>
71#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070072#include <linux/jiffies.h>
Chris Leechc13c8262006-05-23 17:18:44 -070073
74static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list);
77
78/* --- sysfs implementation --- */
79
80static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
81{
82 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
83 unsigned long count = 0;
84 int i;
85
Andrew Morton17f3ae02006-05-25 13:26:53 -070086 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070087 count += per_cpu_ptr(chan->local, i)->memcpy_count;
88
89 return sprintf(buf, "%lu\n", count);
90}
91
92static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
93{
94 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
95 unsigned long count = 0;
96 int i;
97
Andrew Morton17f3ae02006-05-25 13:26:53 -070098 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070099 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
100
101 return sprintf(buf, "%lu\n", count);
102}
103
104static ssize_t show_in_use(struct class_device *cd, char *buf)
105{
106 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700107 int in_use = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700108
Dan Williamsd379b012007-07-09 11:56:42 -0700109 if (unlikely(chan->slow_ref) &&
110 atomic_read(&chan->refcount.refcount) > 1)
111 in_use = 1;
112 else {
113 if (local_read(&(per_cpu_ptr(chan->local,
114 get_cpu())->refcount)) > 0)
115 in_use = 1;
116 put_cpu();
117 }
118
119 return sprintf(buf, "%d\n", in_use);
Chris Leechc13c8262006-05-23 17:18:44 -0700120}
121
122static struct class_device_attribute dma_class_attrs[] = {
123 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
124 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
125 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
126 __ATTR_NULL
127};
128
129static void dma_async_device_cleanup(struct kref *kref);
130
131static void dma_class_dev_release(struct class_device *cd)
132{
133 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
134 kref_put(&chan->device->refcount, dma_async_device_cleanup);
135}
136
137static struct class dma_devclass = {
138 .name = "dma",
139 .class_dev_attrs = dma_class_attrs,
140 .release = dma_class_dev_release,
141};
142
143/* --- client and device registration --- */
144
Dan Williamsd379b012007-07-09 11:56:42 -0700145#define dma_chan_satisfies_mask(chan, mask) \
146 __dma_chan_satisfies_mask((chan), &(mask))
147static int
148__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
149{
150 dma_cap_mask_t has;
151
152 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
153 DMA_TX_TYPE_END);
154 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
155}
156
Chris Leechc13c8262006-05-23 17:18:44 -0700157/**
Dan Williamsd379b012007-07-09 11:56:42 -0700158 * dma_client_chan_alloc - try to allocate channels to a client
Chris Leechc13c8262006-05-23 17:18:44 -0700159 * @client: &dma_client
160 *
161 * Called with dma_list_mutex held.
162 */
Dan Williamsd379b012007-07-09 11:56:42 -0700163static void dma_client_chan_alloc(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700164{
165 struct dma_device *device;
166 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700167 int desc; /* allocated descriptor count */
Dan Williamsd379b012007-07-09 11:56:42 -0700168 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700169
Dan Williamsd379b012007-07-09 11:56:42 -0700170 /* Find a channel */
171 list_for_each_entry(device, &dma_device_list, global_node)
Chris Leechc13c8262006-05-23 17:18:44 -0700172 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700173 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
Chris Leechc13c8262006-05-23 17:18:44 -0700174 continue;
175
176 desc = chan->device->device_alloc_chan_resources(chan);
177 if (desc >= 0) {
Dan Williamsd379b012007-07-09 11:56:42 -0700178 ack = client->event_callback(client,
179 chan,
180 DMA_RESOURCE_AVAILABLE);
181
182 /* we are done once this client rejects
183 * an available resource
184 */
185 if (ack == DMA_ACK) {
186 dma_chan_get(chan);
187 kref_get(&device->refcount);
188 } else if (ack == DMA_NAK)
189 return;
Chris Leechc13c8262006-05-23 17:18:44 -0700190 }
191 }
Chris Leechc13c8262006-05-23 17:18:44 -0700192}
193
Dan Williams7405f742007-01-02 11:10:43 -0700194enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
195{
196 enum dma_status status;
197 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
198
199 dma_async_issue_pending(chan);
200 do {
201 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
202 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
203 printk(KERN_ERR "dma_sync_wait_timeout!\n");
204 return DMA_ERROR;
205 }
206 } while (status == DMA_IN_PROGRESS);
207
208 return status;
209}
210EXPORT_SYMBOL(dma_sync_wait);
211
Chris Leechc13c8262006-05-23 17:18:44 -0700212/**
Randy Dunlap65088712006-07-03 19:45:31 -0700213 * dma_chan_cleanup - release a DMA channel's resources
214 * @kref: kernel reference structure that contains the DMA channel device
Chris Leechc13c8262006-05-23 17:18:44 -0700215 */
216void dma_chan_cleanup(struct kref *kref)
217{
218 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
219 chan->device->device_free_chan_resources(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700220 kref_put(&chan->device->refcount, dma_async_device_cleanup);
221}
David Brownell765e3d82007-03-16 13:38:05 -0800222EXPORT_SYMBOL(dma_chan_cleanup);
Chris Leechc13c8262006-05-23 17:18:44 -0700223
224static void dma_chan_free_rcu(struct rcu_head *rcu)
225{
226 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
227 int bias = 0x7FFFFFFF;
228 int i;
Andrew Morton17f3ae02006-05-25 13:26:53 -0700229 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -0700230 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
231 atomic_sub(bias, &chan->refcount.refcount);
232 kref_put(&chan->refcount, dma_chan_cleanup);
233}
234
Dan Williamsd379b012007-07-09 11:56:42 -0700235static void dma_chan_release(struct dma_chan *chan)
Chris Leechc13c8262006-05-23 17:18:44 -0700236{
237 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
238 chan->slow_ref = 1;
239 call_rcu(&chan->rcu, dma_chan_free_rcu);
240}
241
242/**
Dan Williamsd379b012007-07-09 11:56:42 -0700243 * dma_chans_notify_available - broadcast available channels to the clients
Chris Leechc13c8262006-05-23 17:18:44 -0700244 */
Dan Williamsd379b012007-07-09 11:56:42 -0700245static void dma_clients_notify_available(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700246{
247 struct dma_client *client;
Dan Williamsd379b012007-07-09 11:56:42 -0700248
249 mutex_lock(&dma_list_mutex);
250
251 list_for_each_entry(client, &dma_client_list, global_node)
252 dma_client_chan_alloc(client);
253
254 mutex_unlock(&dma_list_mutex);
255}
256
257/**
258 * dma_chans_notify_available - tell the clients that a channel is going away
259 * @chan: channel on its way out
260 */
261static void dma_clients_notify_removed(struct dma_chan *chan)
262{
263 struct dma_client *client;
264 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700265
266 mutex_lock(&dma_list_mutex);
267
268 list_for_each_entry(client, &dma_client_list, global_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700269 ack = client->event_callback(client, chan,
270 DMA_RESOURCE_REMOVED);
271
272 /* client was holding resources for this channel so
273 * free it
274 */
275 if (ack == DMA_ACK) {
276 dma_chan_put(chan);
277 kref_put(&chan->device->refcount,
278 dma_async_device_cleanup);
Chris Leechc13c8262006-05-23 17:18:44 -0700279 }
280 }
281
282 mutex_unlock(&dma_list_mutex);
283}
284
285/**
Dan Williamsd379b012007-07-09 11:56:42 -0700286 * dma_async_client_register - register a &dma_client
287 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
Chris Leechc13c8262006-05-23 17:18:44 -0700288 */
Dan Williamsd379b012007-07-09 11:56:42 -0700289void dma_async_client_register(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700290{
Chris Leechc13c8262006-05-23 17:18:44 -0700291 mutex_lock(&dma_list_mutex);
292 list_add_tail(&client->global_node, &dma_client_list);
293 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700294}
David Brownell765e3d82007-03-16 13:38:05 -0800295EXPORT_SYMBOL(dma_async_client_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700296
297/**
298 * dma_async_client_unregister - unregister a client and free the &dma_client
Randy Dunlap65088712006-07-03 19:45:31 -0700299 * @client: &dma_client to free
Chris Leechc13c8262006-05-23 17:18:44 -0700300 *
301 * Force frees any allocated DMA channels, frees the &dma_client memory
302 */
303void dma_async_client_unregister(struct dma_client *client)
304{
Dan Williamsd379b012007-07-09 11:56:42 -0700305 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700306 struct dma_chan *chan;
Dan Williamsd379b012007-07-09 11:56:42 -0700307 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700308
309 if (!client)
310 return;
311
Chris Leechc13c8262006-05-23 17:18:44 -0700312 mutex_lock(&dma_list_mutex);
Dan Williamsd379b012007-07-09 11:56:42 -0700313 /* free all channels the client is holding */
314 list_for_each_entry(device, &dma_device_list, global_node)
315 list_for_each_entry(chan, &device->channels, device_node) {
316 ack = client->event_callback(client, chan,
317 DMA_RESOURCE_REMOVED);
318
319 if (ack == DMA_ACK) {
320 dma_chan_put(chan);
321 kref_put(&chan->device->refcount,
322 dma_async_device_cleanup);
323 }
324 }
325
Chris Leechc13c8262006-05-23 17:18:44 -0700326 list_del(&client->global_node);
327 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700328}
David Brownell765e3d82007-03-16 13:38:05 -0800329EXPORT_SYMBOL(dma_async_client_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700330
331/**
Dan Williamsd379b012007-07-09 11:56:42 -0700332 * dma_async_client_chan_request - send all available channels to the
333 * client that satisfy the capability mask
334 * @client - requester
Chris Leechc13c8262006-05-23 17:18:44 -0700335 */
Dan Williamsd379b012007-07-09 11:56:42 -0700336void dma_async_client_chan_request(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700337{
Dan Williamsd379b012007-07-09 11:56:42 -0700338 mutex_lock(&dma_list_mutex);
339 dma_client_chan_alloc(client);
340 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700341}
David Brownell765e3d82007-03-16 13:38:05 -0800342EXPORT_SYMBOL(dma_async_client_chan_request);
Chris Leechc13c8262006-05-23 17:18:44 -0700343
344/**
Randy Dunlap65088712006-07-03 19:45:31 -0700345 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700346 * @device: &dma_device
347 */
348int dma_async_device_register(struct dma_device *device)
349{
350 static int id;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800351 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700352 struct dma_chan* chan;
353
354 if (!device)
355 return -ENODEV;
356
Dan Williams7405f742007-01-02 11:10:43 -0700357 /* validate device routines */
358 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
359 !device->device_prep_dma_memcpy);
360 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
361 !device->device_prep_dma_xor);
362 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
363 !device->device_prep_dma_zero_sum);
364 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
365 !device->device_prep_dma_memset);
366 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
367 !device->device_prep_dma_interrupt);
368
369 BUG_ON(!device->device_alloc_chan_resources);
370 BUG_ON(!device->device_free_chan_resources);
371 BUG_ON(!device->device_dependency_added);
372 BUG_ON(!device->device_is_tx_complete);
373 BUG_ON(!device->device_issue_pending);
374 BUG_ON(!device->dev);
375
Chris Leechc13c8262006-05-23 17:18:44 -0700376 init_completion(&device->done);
377 kref_init(&device->refcount);
378 device->dev_id = id++;
379
380 /* represent channels in sysfs. Probably want devs too */
381 list_for_each_entry(chan, &device->channels, device_node) {
382 chan->local = alloc_percpu(typeof(*chan->local));
383 if (chan->local == NULL)
384 continue;
385
386 chan->chan_id = chancnt++;
387 chan->class_dev.class = &dma_devclass;
388 chan->class_dev.dev = NULL;
389 snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
390 device->dev_id, chan->chan_id);
391
Jeff Garzikff487fb2007-03-08 09:57:34 -0800392 rc = class_device_register(&chan->class_dev);
393 if (rc) {
394 chancnt--;
395 free_percpu(chan->local);
396 chan->local = NULL;
397 goto err_out;
398 }
399
Chris Leechc13c8262006-05-23 17:18:44 -0700400 kref_get(&device->refcount);
Dan Williamsd379b012007-07-09 11:56:42 -0700401 kref_init(&chan->refcount);
402 chan->slow_ref = 0;
403 INIT_RCU_HEAD(&chan->rcu);
Chris Leechc13c8262006-05-23 17:18:44 -0700404 }
405
406 mutex_lock(&dma_list_mutex);
407 list_add_tail(&device->global_node, &dma_device_list);
408 mutex_unlock(&dma_list_mutex);
409
Dan Williamsd379b012007-07-09 11:56:42 -0700410 dma_clients_notify_available();
Chris Leechc13c8262006-05-23 17:18:44 -0700411
412 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800413
414err_out:
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (chan->local == NULL)
417 continue;
418 kref_put(&device->refcount, dma_async_device_cleanup);
419 class_device_unregister(&chan->class_dev);
420 chancnt--;
421 free_percpu(chan->local);
422 }
423 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700424}
David Brownell765e3d82007-03-16 13:38:05 -0800425EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700426
427/**
Randy Dunlap65088712006-07-03 19:45:31 -0700428 * dma_async_device_cleanup - function called when all references are released
429 * @kref: kernel reference object
Chris Leechc13c8262006-05-23 17:18:44 -0700430 */
431static void dma_async_device_cleanup(struct kref *kref)
432{
433 struct dma_device *device;
434
435 device = container_of(kref, struct dma_device, refcount);
436 complete(&device->done);
437}
438
Randy Dunlap65088712006-07-03 19:45:31 -0700439/**
440 * dma_async_device_unregister - unregisters DMA devices
441 * @device: &dma_device
442 */
443void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700444{
445 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700446
447 mutex_lock(&dma_list_mutex);
448 list_del(&device->global_node);
449 mutex_unlock(&dma_list_mutex);
450
451 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700452 dma_clients_notify_removed(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700453 class_device_unregister(&chan->class_dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700454 dma_chan_release(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700455 }
Chris Leechc13c8262006-05-23 17:18:44 -0700456
457 kref_put(&device->refcount, dma_async_device_cleanup);
458 wait_for_completion(&device->done);
459}
David Brownell765e3d82007-03-16 13:38:05 -0800460EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700461
Dan Williams7405f742007-01-02 11:10:43 -0700462/**
463 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
464 * @chan: DMA channel to offload copy to
465 * @dest: destination address (virtual)
466 * @src: source address (virtual)
467 * @len: length
468 *
469 * Both @dest and @src must be mappable to a bus address according to the
470 * DMA mapping API rules for streaming mappings.
471 * Both @dest and @src must stay memory resident (kernel memory or locked
472 * user space pages).
473 */
474dma_cookie_t
475dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
476 void *src, size_t len)
477{
478 struct dma_device *dev = chan->device;
479 struct dma_async_tx_descriptor *tx;
480 dma_addr_t addr;
481 dma_cookie_t cookie;
482 int cpu;
483
484 tx = dev->device_prep_dma_memcpy(chan, len, 0);
485 if (!tx)
486 return -ENOMEM;
487
488 tx->ack = 1;
489 tx->callback = NULL;
490 addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
491 tx->tx_set_src(addr, tx, 0);
492 addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
493 tx->tx_set_dest(addr, tx, 0);
494 cookie = tx->tx_submit(tx);
495
496 cpu = get_cpu();
497 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
498 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
499 put_cpu();
500
501 return cookie;
502}
503EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
504
505/**
506 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
507 * @chan: DMA channel to offload copy to
508 * @page: destination page
509 * @offset: offset in page to copy to
510 * @kdata: source address (virtual)
511 * @len: length
512 *
513 * Both @page/@offset and @kdata must be mappable to a bus address according
514 * to the DMA mapping API rules for streaming mappings.
515 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
516 * locked user space pages)
517 */
518dma_cookie_t
519dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
520 unsigned int offset, void *kdata, size_t len)
521{
522 struct dma_device *dev = chan->device;
523 struct dma_async_tx_descriptor *tx;
524 dma_addr_t addr;
525 dma_cookie_t cookie;
526 int cpu;
527
528 tx = dev->device_prep_dma_memcpy(chan, len, 0);
529 if (!tx)
530 return -ENOMEM;
531
532 tx->ack = 1;
533 tx->callback = NULL;
534 addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
535 tx->tx_set_src(addr, tx, 0);
536 addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
537 tx->tx_set_dest(addr, tx, 0);
538 cookie = tx->tx_submit(tx);
539
540 cpu = get_cpu();
541 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
542 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
543 put_cpu();
544
545 return cookie;
546}
547EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
548
549/**
550 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
551 * @chan: DMA channel to offload copy to
552 * @dest_pg: destination page
553 * @dest_off: offset in page to copy to
554 * @src_pg: source page
555 * @src_off: offset in page to copy from
556 * @len: length
557 *
558 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
559 * address according to the DMA mapping API rules for streaming mappings.
560 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
561 * (kernel memory or locked user space pages).
562 */
563dma_cookie_t
564dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
565 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
566 size_t len)
567{
568 struct dma_device *dev = chan->device;
569 struct dma_async_tx_descriptor *tx;
570 dma_addr_t addr;
571 dma_cookie_t cookie;
572 int cpu;
573
574 tx = dev->device_prep_dma_memcpy(chan, len, 0);
575 if (!tx)
576 return -ENOMEM;
577
578 tx->ack = 1;
579 tx->callback = NULL;
580 addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
581 tx->tx_set_src(addr, tx, 0);
582 addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
583 tx->tx_set_dest(addr, tx, 0);
584 cookie = tx->tx_submit(tx);
585
586 cpu = get_cpu();
587 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
588 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
589 put_cpu();
590
591 return cookie;
592}
593EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
594
595void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
596 struct dma_chan *chan)
597{
598 tx->chan = chan;
599 spin_lock_init(&tx->lock);
600 INIT_LIST_HEAD(&tx->depend_node);
601 INIT_LIST_HEAD(&tx->depend_list);
602}
603EXPORT_SYMBOL(dma_async_tx_descriptor_init);
604
Chris Leechc13c8262006-05-23 17:18:44 -0700605static int __init dma_bus_init(void)
606{
607 mutex_init(&dma_list_mutex);
608 return class_register(&dma_devclass);
609}
Chris Leechc13c8262006-05-23 17:18:44 -0700610subsys_initcall(dma_bus_init);
611