blob: d59b2f417306f91af453962991c5c681aa20381b [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35 * Both of these are protected by a mutex, dma_list_mutex.
36 *
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
39 *
Dan Williamsd379b012007-07-09 11:56:42 -070040 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
Chris Leechc13c8262006-05-23 17:18:44 -070042 *
43 * Each device has a kref, which is initialized to 1 when the device is
Dan Williamsd379b012007-07-09 11:56:42 -070044 * registered. A kref_get is done for each class_device registered. When the
Chris Leechc13c8262006-05-23 17:18:44 -070045 * class_device is released, the coresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the coresponding kref_put
48 * happens. The device's release function does a completion, so
49 * unregister_device does a remove event, class_device_unregister, a kref_put
50 * for the first reference, then waits on the completion for all other
51 * references to finish.
52 *
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
Dan Williamsd379b012007-07-09 11:56:42 -070054 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
56 * a channel is removed or a client using it is unregesitered. A client can
57 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
59 * -ChrisL, DanW
Chris Leechc13c8262006-05-23 17:18:44 -070060 */
61
62#include <linux/init.h>
63#include <linux/module.h>
Dan Williams7405f742007-01-02 11:10:43 -070064#include <linux/mm.h>
Chris Leechc13c8262006-05-23 17:18:44 -070065#include <linux/device.h>
66#include <linux/dmaengine.h>
67#include <linux/hardirq.h>
68#include <linux/spinlock.h>
69#include <linux/percpu.h>
70#include <linux/rcupdate.h>
71#include <linux/mutex.h>
Dan Williams7405f742007-01-02 11:10:43 -070072#include <linux/jiffies.h>
Chris Leechc13c8262006-05-23 17:18:44 -070073
74static DEFINE_MUTEX(dma_list_mutex);
75static LIST_HEAD(dma_device_list);
76static LIST_HEAD(dma_client_list);
77
78/* --- sysfs implementation --- */
79
80static ssize_t show_memcpy_count(struct class_device *cd, char *buf)
81{
82 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
83 unsigned long count = 0;
84 int i;
85
Andrew Morton17f3ae02006-05-25 13:26:53 -070086 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070087 count += per_cpu_ptr(chan->local, i)->memcpy_count;
88
89 return sprintf(buf, "%lu\n", count);
90}
91
92static ssize_t show_bytes_transferred(struct class_device *cd, char *buf)
93{
94 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
95 unsigned long count = 0;
96 int i;
97
Andrew Morton17f3ae02006-05-25 13:26:53 -070098 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -070099 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
100
101 return sprintf(buf, "%lu\n", count);
102}
103
104static ssize_t show_in_use(struct class_device *cd, char *buf)
105{
106 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700107 int in_use = 0;
Chris Leechc13c8262006-05-23 17:18:44 -0700108
Dan Williamsd379b012007-07-09 11:56:42 -0700109 if (unlikely(chan->slow_ref) &&
110 atomic_read(&chan->refcount.refcount) > 1)
111 in_use = 1;
112 else {
113 if (local_read(&(per_cpu_ptr(chan->local,
114 get_cpu())->refcount)) > 0)
115 in_use = 1;
116 put_cpu();
117 }
118
119 return sprintf(buf, "%d\n", in_use);
Chris Leechc13c8262006-05-23 17:18:44 -0700120}
121
122static struct class_device_attribute dma_class_attrs[] = {
123 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
124 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
125 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
126 __ATTR_NULL
127};
128
129static void dma_async_device_cleanup(struct kref *kref);
130
131static void dma_class_dev_release(struct class_device *cd)
132{
133 struct dma_chan *chan = container_of(cd, struct dma_chan, class_dev);
134 kref_put(&chan->device->refcount, dma_async_device_cleanup);
135}
136
137static struct class dma_devclass = {
138 .name = "dma",
139 .class_dev_attrs = dma_class_attrs,
140 .release = dma_class_dev_release,
141};
142
143/* --- client and device registration --- */
144
Dan Williamsd379b012007-07-09 11:56:42 -0700145#define dma_chan_satisfies_mask(chan, mask) \
146 __dma_chan_satisfies_mask((chan), &(mask))
147static int
148__dma_chan_satisfies_mask(struct dma_chan *chan, dma_cap_mask_t *want)
149{
150 dma_cap_mask_t has;
151
152 bitmap_and(has.bits, want->bits, chan->device->cap_mask.bits,
153 DMA_TX_TYPE_END);
154 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
155}
156
Chris Leechc13c8262006-05-23 17:18:44 -0700157/**
Dan Williamsd379b012007-07-09 11:56:42 -0700158 * dma_client_chan_alloc - try to allocate channels to a client
Chris Leechc13c8262006-05-23 17:18:44 -0700159 * @client: &dma_client
160 *
161 * Called with dma_list_mutex held.
162 */
Dan Williamsd379b012007-07-09 11:56:42 -0700163static void dma_client_chan_alloc(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700164{
165 struct dma_device *device;
166 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700167 int desc; /* allocated descriptor count */
Dan Williamsd379b012007-07-09 11:56:42 -0700168 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700169
Dan Williamsd379b012007-07-09 11:56:42 -0700170 /* Find a channel */
171 list_for_each_entry(device, &dma_device_list, global_node)
Chris Leechc13c8262006-05-23 17:18:44 -0700172 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700173 if (!dma_chan_satisfies_mask(chan, client->cap_mask))
Chris Leechc13c8262006-05-23 17:18:44 -0700174 continue;
175
176 desc = chan->device->device_alloc_chan_resources(chan);
177 if (desc >= 0) {
Dan Williamsd379b012007-07-09 11:56:42 -0700178 ack = client->event_callback(client,
179 chan,
180 DMA_RESOURCE_AVAILABLE);
181
182 /* we are done once this client rejects
183 * an available resource
184 */
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800185 if (ack == DMA_ACK)
Dan Williamsd379b012007-07-09 11:56:42 -0700186 dma_chan_get(chan);
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800187 else if (ack == DMA_NAK)
Dan Williamsd379b012007-07-09 11:56:42 -0700188 return;
Chris Leechc13c8262006-05-23 17:18:44 -0700189 }
190 }
Chris Leechc13c8262006-05-23 17:18:44 -0700191}
192
Dan Williams7405f742007-01-02 11:10:43 -0700193enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
194{
195 enum dma_status status;
196 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
197
198 dma_async_issue_pending(chan);
199 do {
200 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
201 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
202 printk(KERN_ERR "dma_sync_wait_timeout!\n");
203 return DMA_ERROR;
204 }
205 } while (status == DMA_IN_PROGRESS);
206
207 return status;
208}
209EXPORT_SYMBOL(dma_sync_wait);
210
Chris Leechc13c8262006-05-23 17:18:44 -0700211/**
Randy Dunlap65088712006-07-03 19:45:31 -0700212 * dma_chan_cleanup - release a DMA channel's resources
213 * @kref: kernel reference structure that contains the DMA channel device
Chris Leechc13c8262006-05-23 17:18:44 -0700214 */
215void dma_chan_cleanup(struct kref *kref)
216{
217 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
218 chan->device->device_free_chan_resources(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700219 kref_put(&chan->device->refcount, dma_async_device_cleanup);
220}
David Brownell765e3d82007-03-16 13:38:05 -0800221EXPORT_SYMBOL(dma_chan_cleanup);
Chris Leechc13c8262006-05-23 17:18:44 -0700222
223static void dma_chan_free_rcu(struct rcu_head *rcu)
224{
225 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
226 int bias = 0x7FFFFFFF;
227 int i;
Andrew Morton17f3ae02006-05-25 13:26:53 -0700228 for_each_possible_cpu(i)
Chris Leechc13c8262006-05-23 17:18:44 -0700229 bias -= local_read(&per_cpu_ptr(chan->local, i)->refcount);
230 atomic_sub(bias, &chan->refcount.refcount);
231 kref_put(&chan->refcount, dma_chan_cleanup);
232}
233
Dan Williamsd379b012007-07-09 11:56:42 -0700234static void dma_chan_release(struct dma_chan *chan)
Chris Leechc13c8262006-05-23 17:18:44 -0700235{
236 atomic_add(0x7FFFFFFF, &chan->refcount.refcount);
237 chan->slow_ref = 1;
238 call_rcu(&chan->rcu, dma_chan_free_rcu);
239}
240
241/**
Dan Williamsd379b012007-07-09 11:56:42 -0700242 * dma_chans_notify_available - broadcast available channels to the clients
Chris Leechc13c8262006-05-23 17:18:44 -0700243 */
Dan Williamsd379b012007-07-09 11:56:42 -0700244static void dma_clients_notify_available(void)
Chris Leechc13c8262006-05-23 17:18:44 -0700245{
246 struct dma_client *client;
Dan Williamsd379b012007-07-09 11:56:42 -0700247
248 mutex_lock(&dma_list_mutex);
249
250 list_for_each_entry(client, &dma_client_list, global_node)
251 dma_client_chan_alloc(client);
252
253 mutex_unlock(&dma_list_mutex);
254}
255
256/**
257 * dma_chans_notify_available - tell the clients that a channel is going away
258 * @chan: channel on its way out
259 */
260static void dma_clients_notify_removed(struct dma_chan *chan)
261{
262 struct dma_client *client;
263 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700264
265 mutex_lock(&dma_list_mutex);
266
267 list_for_each_entry(client, &dma_client_list, global_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700268 ack = client->event_callback(client, chan,
269 DMA_RESOURCE_REMOVED);
270
271 /* client was holding resources for this channel so
272 * free it
273 */
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800274 if (ack == DMA_ACK)
Dan Williamsd379b012007-07-09 11:56:42 -0700275 dma_chan_put(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700276 }
277
278 mutex_unlock(&dma_list_mutex);
279}
280
281/**
Dan Williamsd379b012007-07-09 11:56:42 -0700282 * dma_async_client_register - register a &dma_client
283 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
Chris Leechc13c8262006-05-23 17:18:44 -0700284 */
Dan Williamsd379b012007-07-09 11:56:42 -0700285void dma_async_client_register(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700286{
Chris Leechc13c8262006-05-23 17:18:44 -0700287 mutex_lock(&dma_list_mutex);
288 list_add_tail(&client->global_node, &dma_client_list);
289 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700290}
David Brownell765e3d82007-03-16 13:38:05 -0800291EXPORT_SYMBOL(dma_async_client_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700292
293/**
294 * dma_async_client_unregister - unregister a client and free the &dma_client
Randy Dunlap65088712006-07-03 19:45:31 -0700295 * @client: &dma_client to free
Chris Leechc13c8262006-05-23 17:18:44 -0700296 *
297 * Force frees any allocated DMA channels, frees the &dma_client memory
298 */
299void dma_async_client_unregister(struct dma_client *client)
300{
Dan Williamsd379b012007-07-09 11:56:42 -0700301 struct dma_device *device;
Chris Leechc13c8262006-05-23 17:18:44 -0700302 struct dma_chan *chan;
Dan Williamsd379b012007-07-09 11:56:42 -0700303 enum dma_state_client ack;
Chris Leechc13c8262006-05-23 17:18:44 -0700304
305 if (!client)
306 return;
307
Chris Leechc13c8262006-05-23 17:18:44 -0700308 mutex_lock(&dma_list_mutex);
Dan Williamsd379b012007-07-09 11:56:42 -0700309 /* free all channels the client is holding */
310 list_for_each_entry(device, &dma_device_list, global_node)
311 list_for_each_entry(chan, &device->channels, device_node) {
312 ack = client->event_callback(client, chan,
313 DMA_RESOURCE_REMOVED);
314
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800315 if (ack == DMA_ACK)
Dan Williamsd379b012007-07-09 11:56:42 -0700316 dma_chan_put(chan);
Dan Williamsd379b012007-07-09 11:56:42 -0700317 }
318
Chris Leechc13c8262006-05-23 17:18:44 -0700319 list_del(&client->global_node);
320 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700321}
David Brownell765e3d82007-03-16 13:38:05 -0800322EXPORT_SYMBOL(dma_async_client_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700323
324/**
Dan Williamsd379b012007-07-09 11:56:42 -0700325 * dma_async_client_chan_request - send all available channels to the
326 * client that satisfy the capability mask
327 * @client - requester
Chris Leechc13c8262006-05-23 17:18:44 -0700328 */
Dan Williamsd379b012007-07-09 11:56:42 -0700329void dma_async_client_chan_request(struct dma_client *client)
Chris Leechc13c8262006-05-23 17:18:44 -0700330{
Dan Williamsd379b012007-07-09 11:56:42 -0700331 mutex_lock(&dma_list_mutex);
332 dma_client_chan_alloc(client);
333 mutex_unlock(&dma_list_mutex);
Chris Leechc13c8262006-05-23 17:18:44 -0700334}
David Brownell765e3d82007-03-16 13:38:05 -0800335EXPORT_SYMBOL(dma_async_client_chan_request);
Chris Leechc13c8262006-05-23 17:18:44 -0700336
337/**
Randy Dunlap65088712006-07-03 19:45:31 -0700338 * dma_async_device_register - registers DMA devices found
Chris Leechc13c8262006-05-23 17:18:44 -0700339 * @device: &dma_device
340 */
341int dma_async_device_register(struct dma_device *device)
342{
343 static int id;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800344 int chancnt = 0, rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700345 struct dma_chan* chan;
346
347 if (!device)
348 return -ENODEV;
349
Dan Williams7405f742007-01-02 11:10:43 -0700350 /* validate device routines */
351 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
352 !device->device_prep_dma_memcpy);
353 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
354 !device->device_prep_dma_xor);
355 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
356 !device->device_prep_dma_zero_sum);
357 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
358 !device->device_prep_dma_memset);
359 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
360 !device->device_prep_dma_interrupt);
361
362 BUG_ON(!device->device_alloc_chan_resources);
363 BUG_ON(!device->device_free_chan_resources);
364 BUG_ON(!device->device_dependency_added);
365 BUG_ON(!device->device_is_tx_complete);
366 BUG_ON(!device->device_issue_pending);
367 BUG_ON(!device->dev);
368
Chris Leechc13c8262006-05-23 17:18:44 -0700369 init_completion(&device->done);
370 kref_init(&device->refcount);
371 device->dev_id = id++;
372
373 /* represent channels in sysfs. Probably want devs too */
374 list_for_each_entry(chan, &device->channels, device_node) {
375 chan->local = alloc_percpu(typeof(*chan->local));
376 if (chan->local == NULL)
377 continue;
378
379 chan->chan_id = chancnt++;
380 chan->class_dev.class = &dma_devclass;
381 chan->class_dev.dev = NULL;
382 snprintf(chan->class_dev.class_id, BUS_ID_SIZE, "dma%dchan%d",
383 device->dev_id, chan->chan_id);
384
Jeff Garzikff487fb2007-03-08 09:57:34 -0800385 rc = class_device_register(&chan->class_dev);
386 if (rc) {
387 chancnt--;
388 free_percpu(chan->local);
389 chan->local = NULL;
390 goto err_out;
391 }
392
Haavard Skinnemoen348badf2007-11-14 16:59:27 -0800393 /* One for the channel, one of the class device */
394 kref_get(&device->refcount);
Chris Leechc13c8262006-05-23 17:18:44 -0700395 kref_get(&device->refcount);
Dan Williamsd379b012007-07-09 11:56:42 -0700396 kref_init(&chan->refcount);
397 chan->slow_ref = 0;
398 INIT_RCU_HEAD(&chan->rcu);
Chris Leechc13c8262006-05-23 17:18:44 -0700399 }
400
401 mutex_lock(&dma_list_mutex);
402 list_add_tail(&device->global_node, &dma_device_list);
403 mutex_unlock(&dma_list_mutex);
404
Dan Williamsd379b012007-07-09 11:56:42 -0700405 dma_clients_notify_available();
Chris Leechc13c8262006-05-23 17:18:44 -0700406
407 return 0;
Jeff Garzikff487fb2007-03-08 09:57:34 -0800408
409err_out:
410 list_for_each_entry(chan, &device->channels, device_node) {
411 if (chan->local == NULL)
412 continue;
413 kref_put(&device->refcount, dma_async_device_cleanup);
414 class_device_unregister(&chan->class_dev);
415 chancnt--;
416 free_percpu(chan->local);
417 }
418 return rc;
Chris Leechc13c8262006-05-23 17:18:44 -0700419}
David Brownell765e3d82007-03-16 13:38:05 -0800420EXPORT_SYMBOL(dma_async_device_register);
Chris Leechc13c8262006-05-23 17:18:44 -0700421
422/**
Randy Dunlap65088712006-07-03 19:45:31 -0700423 * dma_async_device_cleanup - function called when all references are released
424 * @kref: kernel reference object
Chris Leechc13c8262006-05-23 17:18:44 -0700425 */
426static void dma_async_device_cleanup(struct kref *kref)
427{
428 struct dma_device *device;
429
430 device = container_of(kref, struct dma_device, refcount);
431 complete(&device->done);
432}
433
Randy Dunlap65088712006-07-03 19:45:31 -0700434/**
435 * dma_async_device_unregister - unregisters DMA devices
436 * @device: &dma_device
437 */
438void dma_async_device_unregister(struct dma_device *device)
Chris Leechc13c8262006-05-23 17:18:44 -0700439{
440 struct dma_chan *chan;
Chris Leechc13c8262006-05-23 17:18:44 -0700441
442 mutex_lock(&dma_list_mutex);
443 list_del(&device->global_node);
444 mutex_unlock(&dma_list_mutex);
445
446 list_for_each_entry(chan, &device->channels, device_node) {
Dan Williamsd379b012007-07-09 11:56:42 -0700447 dma_clients_notify_removed(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700448 class_device_unregister(&chan->class_dev);
Dan Williamsd379b012007-07-09 11:56:42 -0700449 dma_chan_release(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700450 }
Chris Leechc13c8262006-05-23 17:18:44 -0700451
452 kref_put(&device->refcount, dma_async_device_cleanup);
453 wait_for_completion(&device->done);
454}
David Brownell765e3d82007-03-16 13:38:05 -0800455EXPORT_SYMBOL(dma_async_device_unregister);
Chris Leechc13c8262006-05-23 17:18:44 -0700456
Dan Williams7405f742007-01-02 11:10:43 -0700457/**
458 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
459 * @chan: DMA channel to offload copy to
460 * @dest: destination address (virtual)
461 * @src: source address (virtual)
462 * @len: length
463 *
464 * Both @dest and @src must be mappable to a bus address according to the
465 * DMA mapping API rules for streaming mappings.
466 * Both @dest and @src must stay memory resident (kernel memory or locked
467 * user space pages).
468 */
469dma_cookie_t
470dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
471 void *src, size_t len)
472{
473 struct dma_device *dev = chan->device;
474 struct dma_async_tx_descriptor *tx;
475 dma_addr_t addr;
476 dma_cookie_t cookie;
477 int cpu;
478
479 tx = dev->device_prep_dma_memcpy(chan, len, 0);
480 if (!tx)
481 return -ENOMEM;
482
483 tx->ack = 1;
484 tx->callback = NULL;
485 addr = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
486 tx->tx_set_src(addr, tx, 0);
487 addr = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
488 tx->tx_set_dest(addr, tx, 0);
489 cookie = tx->tx_submit(tx);
490
491 cpu = get_cpu();
492 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
493 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
494 put_cpu();
495
496 return cookie;
497}
498EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
499
500/**
501 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
502 * @chan: DMA channel to offload copy to
503 * @page: destination page
504 * @offset: offset in page to copy to
505 * @kdata: source address (virtual)
506 * @len: length
507 *
508 * Both @page/@offset and @kdata must be mappable to a bus address according
509 * to the DMA mapping API rules for streaming mappings.
510 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
511 * locked user space pages)
512 */
513dma_cookie_t
514dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
515 unsigned int offset, void *kdata, size_t len)
516{
517 struct dma_device *dev = chan->device;
518 struct dma_async_tx_descriptor *tx;
519 dma_addr_t addr;
520 dma_cookie_t cookie;
521 int cpu;
522
523 tx = dev->device_prep_dma_memcpy(chan, len, 0);
524 if (!tx)
525 return -ENOMEM;
526
527 tx->ack = 1;
528 tx->callback = NULL;
529 addr = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
530 tx->tx_set_src(addr, tx, 0);
531 addr = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
532 tx->tx_set_dest(addr, tx, 0);
533 cookie = tx->tx_submit(tx);
534
535 cpu = get_cpu();
536 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
537 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
538 put_cpu();
539
540 return cookie;
541}
542EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
543
544/**
545 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
546 * @chan: DMA channel to offload copy to
547 * @dest_pg: destination page
548 * @dest_off: offset in page to copy to
549 * @src_pg: source page
550 * @src_off: offset in page to copy from
551 * @len: length
552 *
553 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
554 * address according to the DMA mapping API rules for streaming mappings.
555 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
556 * (kernel memory or locked user space pages).
557 */
558dma_cookie_t
559dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
560 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
561 size_t len)
562{
563 struct dma_device *dev = chan->device;
564 struct dma_async_tx_descriptor *tx;
565 dma_addr_t addr;
566 dma_cookie_t cookie;
567 int cpu;
568
569 tx = dev->device_prep_dma_memcpy(chan, len, 0);
570 if (!tx)
571 return -ENOMEM;
572
573 tx->ack = 1;
574 tx->callback = NULL;
575 addr = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
576 tx->tx_set_src(addr, tx, 0);
577 addr = dma_map_page(dev->dev, dest_pg, dest_off, len, DMA_FROM_DEVICE);
578 tx->tx_set_dest(addr, tx, 0);
579 cookie = tx->tx_submit(tx);
580
581 cpu = get_cpu();
582 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
583 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
584 put_cpu();
585
586 return cookie;
587}
588EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
589
590void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
591 struct dma_chan *chan)
592{
593 tx->chan = chan;
594 spin_lock_init(&tx->lock);
595 INIT_LIST_HEAD(&tx->depend_node);
596 INIT_LIST_HEAD(&tx->depend_list);
597}
598EXPORT_SYMBOL(dma_async_tx_descriptor_init);
599
Chris Leechc13c8262006-05-23 17:18:44 -0700600static int __init dma_bus_init(void)
601{
602 mutex_init(&dma_list_mutex);
603 return class_register(&dma_devclass);
604}
Chris Leechc13c8262006-05-23 17:18:44 -0700605subsys_initcall(dma_bus_init);
606