blob: acbb364674ff1f8516f50747d0d48375f15df49a [file] [log] [blame]
Chris Leechc13c8262006-05-23 17:18:44 -07001/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef DMAENGINE_H
22#define DMAENGINE_H
David Woodhouse1c0f16e2006-06-27 02:53:56 -070023
Chris Leechc13c8262006-05-23 17:18:44 -070024#include <linux/device.h>
25#include <linux/uio.h>
26#include <linux/kref.h>
27#include <linux/completion.h>
28#include <linux/rcupdate.h>
Dan Williams7405f742007-01-02 11:10:43 -070029#include <linux/dma-mapping.h>
Chris Leechc13c8262006-05-23 17:18:44 -070030
31/**
Joe Perchesfd3f8982008-02-03 17:45:46 +020032 * enum dma_state - resource PNP/power management state
Chris Leechc13c8262006-05-23 17:18:44 -070033 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
Dan Williamsd379b012007-07-09 11:56:42 -070035 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
Chris Leechc13c8262006-05-23 17:18:44 -070036 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */
Dan Williamsd379b012007-07-09 11:56:42 -070038enum dma_state {
Chris Leechc13c8262006-05-23 17:18:44 -070039 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME,
Dan Williamsd379b012007-07-09 11:56:42 -070041 DMA_RESOURCE_AVAILABLE,
Chris Leechc13c8262006-05-23 17:18:44 -070042 DMA_RESOURCE_REMOVED,
43};
44
45/**
Dan Williamsd379b012007-07-09 11:56:42 -070046 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55};
56
57/**
Randy Dunlapfe4ada22006-07-03 19:44:51 -070058 * typedef dma_cookie_t - an opaque DMA cookie
Chris Leechc13c8262006-05-23 17:18:44 -070059 *
60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
61 */
62typedef s32 dma_cookie_t;
63
64#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
65
66/**
67 * enum dma_status - DMA transaction status
68 * @DMA_SUCCESS: transaction completed successfully
69 * @DMA_IN_PROGRESS: transaction not yet processed
70 * @DMA_ERROR: transaction failed
71 */
72enum dma_status {
73 DMA_SUCCESS,
74 DMA_IN_PROGRESS,
75 DMA_ERROR,
76};
77
78/**
Dan Williams7405f742007-01-02 11:10:43 -070079 * enum dma_transaction_type - DMA transaction types/indexes
80 */
81enum dma_transaction_type {
82 DMA_MEMCPY,
83 DMA_XOR,
84 DMA_PQ_XOR,
85 DMA_DUAL_XOR,
86 DMA_PQ_UPDATE,
87 DMA_ZERO_SUM,
88 DMA_PQ_ZERO_SUM,
89 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT,
92};
93
94/* last transaction type for creation of the capabilities mask */
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
96
97/**
Dan Williamsd4c56f92008-02-02 19:49:58 -070098 * enum dma_prep_flags - DMA flags to augment operation preparation
99 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
100 * this transaction
101 */
102enum dma_prep_flags {
103 DMA_PREP_INTERRUPT = (1 << 0),
104};
105
106/**
Dan Williams7405f742007-01-02 11:10:43 -0700107 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
108 * See linux/cpumask.h
109 */
110typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
111
112/**
Chris Leechc13c8262006-05-23 17:18:44 -0700113 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
114 * @refcount: local_t used for open-coded "bigref" counting
115 * @memcpy_count: transaction counter
116 * @bytes_transferred: byte counter
117 */
118
119struct dma_chan_percpu {
120 local_t refcount;
121 /* stats */
122 unsigned long memcpy_count;
123 unsigned long bytes_transferred;
124};
125
126/**
127 * struct dma_chan - devices supply DMA channels, clients use them
Randy Dunlapfe4ada22006-07-03 19:44:51 -0700128 * @device: ptr to the dma device who supplies this channel, always !%NULL
Chris Leechc13c8262006-05-23 17:18:44 -0700129 * @cookie: last cookie value returned to client
Randy Dunlapfe4ada22006-07-03 19:44:51 -0700130 * @chan_id: channel ID for sysfs
131 * @class_dev: class device for sysfs
Chris Leechc13c8262006-05-23 17:18:44 -0700132 * @refcount: kref, used in "bigref" slow-mode
Randy Dunlapfe4ada22006-07-03 19:44:51 -0700133 * @slow_ref: indicates that the DMA channel is free
134 * @rcu: the DMA channel's RCU head
Chris Leechc13c8262006-05-23 17:18:44 -0700135 * @device_node: used to add this to the device chan list
136 * @local: per-cpu pointer to a struct dma_chan_percpu
137 */
138struct dma_chan {
Chris Leechc13c8262006-05-23 17:18:44 -0700139 struct dma_device *device;
140 dma_cookie_t cookie;
141
142 /* sysfs */
143 int chan_id;
Tony Jones891f78e2007-09-25 02:03:03 +0200144 struct device dev;
Chris Leechc13c8262006-05-23 17:18:44 -0700145
146 struct kref refcount;
147 int slow_ref;
148 struct rcu_head rcu;
149
Chris Leechc13c8262006-05-23 17:18:44 -0700150 struct list_head device_node;
151 struct dma_chan_percpu *local;
152};
153
Tony Jones891f78e2007-09-25 02:03:03 +0200154#define to_dma_chan(p) container_of(p, struct dma_chan, dev)
Dan Williamsd379b012007-07-09 11:56:42 -0700155
Chris Leechc13c8262006-05-23 17:18:44 -0700156void dma_chan_cleanup(struct kref *kref);
157
158static inline void dma_chan_get(struct dma_chan *chan)
159{
160 if (unlikely(chan->slow_ref))
161 kref_get(&chan->refcount);
162 else {
163 local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
164 put_cpu();
165 }
166}
167
168static inline void dma_chan_put(struct dma_chan *chan)
169{
170 if (unlikely(chan->slow_ref))
171 kref_put(&chan->refcount, dma_chan_cleanup);
172 else {
173 local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
174 put_cpu();
175 }
176}
177
178/*
179 * typedef dma_event_callback - function pointer to a DMA event callback
Dan Williamsd379b012007-07-09 11:56:42 -0700180 * For each channel added to the system this routine is called for each client.
181 * If the client would like to use the channel it returns '1' to signal (ack)
182 * the dmaengine core to take out a reference on the channel and its
183 * corresponding device. A client must not 'ack' an available channel more
184 * than once. When a channel is removed all clients are notified. If a client
185 * is using the channel it must 'ack' the removal. A client must not 'ack' a
186 * removed channel more than once.
187 * @client - 'this' pointer for the client context
188 * @chan - channel to be acted upon
189 * @state - available or removed
Chris Leechc13c8262006-05-23 17:18:44 -0700190 */
Dan Williamsd379b012007-07-09 11:56:42 -0700191struct dma_client;
192typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
193 struct dma_chan *chan, enum dma_state state);
Chris Leechc13c8262006-05-23 17:18:44 -0700194
195/**
196 * struct dma_client - info on the entity making use of DMA services
197 * @event_callback: func ptr to call when something happens
Dan Williamsd379b012007-07-09 11:56:42 -0700198 * @cap_mask: only return channels that satisfy the requested capabilities
199 * a value of zero corresponds to any capability
Chris Leechc13c8262006-05-23 17:18:44 -0700200 * @global_node: list_head for global dma_client_list
201 */
202struct dma_client {
203 dma_event_callback event_callback;
Dan Williamsd379b012007-07-09 11:56:42 -0700204 dma_cap_mask_t cap_mask;
Chris Leechc13c8262006-05-23 17:18:44 -0700205 struct list_head global_node;
206};
207
Dan Williams7405f742007-01-02 11:10:43 -0700208typedef void (*dma_async_tx_callback)(void *dma_async_param);
209/**
210 * struct dma_async_tx_descriptor - async transaction descriptor
211 * ---dma generic offload fields---
212 * @cookie: tracking cookie for this transaction, set to -EBUSY if
213 * this tx is sitting on a dependency list
214 * @ack: the descriptor can not be reused until the client acknowledges
215 * receipt, i.e. has has a chance to establish any dependency chains
216 * @phys: physical address of the descriptor
217 * @tx_list: driver common field for operations that require multiple
218 * descriptors
219 * @chan: target channel for this operation
220 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
Dan Williams7405f742007-01-02 11:10:43 -0700221 * @callback: routine to call after this operation is complete
222 * @callback_param: general parameter to pass to the callback routine
223 * ---async_tx api specific fields---
224 * @depend_list: at completion this list of transactions are submitted
225 * @depend_node: allow this transaction to be executed after another
226 * transaction has completed, possibly on another channel
227 * @parent: pointer to the next level up in the dependency chain
228 * @lock: protect the dependency list
229 */
230struct dma_async_tx_descriptor {
231 dma_cookie_t cookie;
232 int ack;
233 dma_addr_t phys;
234 struct list_head tx_list;
235 struct dma_chan *chan;
236 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
Dan Williams7405f742007-01-02 11:10:43 -0700237 dma_async_tx_callback callback;
238 void *callback_param;
239 struct list_head depend_list;
240 struct list_head depend_node;
241 struct dma_async_tx_descriptor *parent;
242 spinlock_t lock;
243};
244
Chris Leechc13c8262006-05-23 17:18:44 -0700245/**
246 * struct dma_device - info on the entity supplying DMA services
247 * @chancnt: how many DMA channels are supported
248 * @channels: the list of struct dma_chan
249 * @global_node: list_head for global dma_device_list
Dan Williams7405f742007-01-02 11:10:43 -0700250 * @cap_mask: one or more dma_capability flags
251 * @max_xor: maximum number of xor sources, 0 if no capability
Randy Dunlapfe4ada22006-07-03 19:44:51 -0700252 * @refcount: reference count
253 * @done: IO completion struct
254 * @dev_id: unique device ID
Dan Williams7405f742007-01-02 11:10:43 -0700255 * @dev: struct device reference for dma mapping api
Randy Dunlapfe4ada22006-07-03 19:44:51 -0700256 * @device_alloc_chan_resources: allocate resources and return the
257 * number of allocated descriptors
258 * @device_free_chan_resources: release DMA channel's resources
Dan Williams7405f742007-01-02 11:10:43 -0700259 * @device_prep_dma_memcpy: prepares a memcpy operation
260 * @device_prep_dma_xor: prepares a xor operation
261 * @device_prep_dma_zero_sum: prepares a zero_sum operation
262 * @device_prep_dma_memset: prepares a memset operation
263 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
264 * @device_dependency_added: async_tx notifies the channel about new deps
265 * @device_issue_pending: push pending transactions to hardware
Chris Leechc13c8262006-05-23 17:18:44 -0700266 */
267struct dma_device {
268
269 unsigned int chancnt;
270 struct list_head channels;
271 struct list_head global_node;
Dan Williams7405f742007-01-02 11:10:43 -0700272 dma_cap_mask_t cap_mask;
273 int max_xor;
Chris Leechc13c8262006-05-23 17:18:44 -0700274
275 struct kref refcount;
276 struct completion done;
277
278 int dev_id;
Dan Williams7405f742007-01-02 11:10:43 -0700279 struct device *dev;
Chris Leechc13c8262006-05-23 17:18:44 -0700280
281 int (*device_alloc_chan_resources)(struct dma_chan *chan);
282 void (*device_free_chan_resources)(struct dma_chan *chan);
Dan Williams7405f742007-01-02 11:10:43 -0700283
284 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
Dan Williams00367312008-02-02 19:49:57 -0700285 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700286 size_t len, unsigned long flags);
Dan Williams7405f742007-01-02 11:10:43 -0700287 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
Dan Williams00367312008-02-02 19:49:57 -0700288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700289 unsigned int src_cnt, size_t len, unsigned long flags);
Dan Williams7405f742007-01-02 11:10:43 -0700290 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
Dan Williams00367312008-02-02 19:49:57 -0700291 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700292 size_t len, u32 *result, unsigned long flags);
Dan Williams7405f742007-01-02 11:10:43 -0700293 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
Dan Williams00367312008-02-02 19:49:57 -0700294 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
Dan Williamsd4c56f92008-02-02 19:49:58 -0700295 unsigned long flags);
Dan Williams7405f742007-01-02 11:10:43 -0700296 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
297 struct dma_chan *chan);
298
299 void (*device_dependency_added)(struct dma_chan *chan);
300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
Chris Leechc13c8262006-05-23 17:18:44 -0700301 dma_cookie_t cookie, dma_cookie_t *last,
302 dma_cookie_t *used);
Dan Williams7405f742007-01-02 11:10:43 -0700303 void (*device_issue_pending)(struct dma_chan *chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700304};
305
306/* --- public DMA engine API --- */
307
Dan Williamsd379b012007-07-09 11:56:42 -0700308void dma_async_client_register(struct dma_client *client);
Chris Leechc13c8262006-05-23 17:18:44 -0700309void dma_async_client_unregister(struct dma_client *client);
Dan Williamsd379b012007-07-09 11:56:42 -0700310void dma_async_client_chan_request(struct dma_client *client);
Dan Williams7405f742007-01-02 11:10:43 -0700311dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
312 void *dest, void *src, size_t len);
313dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
314 struct page *page, unsigned int offset, void *kdata, size_t len);
315dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
Chris Leechc13c8262006-05-23 17:18:44 -0700316 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
Dan Williams7405f742007-01-02 11:10:43 -0700317 unsigned int src_off, size_t len);
318void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
319 struct dma_chan *chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700320
Dan Williams7405f742007-01-02 11:10:43 -0700321static inline void
322async_tx_ack(struct dma_async_tx_descriptor *tx)
323{
324 tx->ack = 1;
Chris Leechc13c8262006-05-23 17:18:44 -0700325}
326
Dan Williams7405f742007-01-02 11:10:43 -0700327#define first_dma_cap(mask) __first_dma_cap(&(mask))
328static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
329{
330 return min_t(int, DMA_TX_TYPE_END,
331 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
332}
333
334#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
335static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
336{
337 return min_t(int, DMA_TX_TYPE_END,
338 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
339}
340
341#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
342static inline void
343__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
344{
345 set_bit(tx_type, dstp->bits);
346}
347
348#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
349static inline int
350__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
351{
352 return test_bit(tx_type, srcp->bits);
353}
354
355#define for_each_dma_cap_mask(cap, mask) \
356 for ((cap) = first_dma_cap(mask); \
357 (cap) < DMA_TX_TYPE_END; \
358 (cap) = next_dma_cap((cap), (mask)))
359
Chris Leechc13c8262006-05-23 17:18:44 -0700360/**
Dan Williams7405f742007-01-02 11:10:43 -0700361 * dma_async_issue_pending - flush pending transactions to HW
Randy Dunlapfe4ada22006-07-03 19:44:51 -0700362 * @chan: target DMA channel
Chris Leechc13c8262006-05-23 17:18:44 -0700363 *
364 * This allows drivers to push copies to HW in batches,
365 * reducing MMIO writes where possible.
366 */
Dan Williams7405f742007-01-02 11:10:43 -0700367static inline void dma_async_issue_pending(struct dma_chan *chan)
Chris Leechc13c8262006-05-23 17:18:44 -0700368{
Dan Williams7405f742007-01-02 11:10:43 -0700369 return chan->device->device_issue_pending(chan);
Chris Leechc13c8262006-05-23 17:18:44 -0700370}
371
Dan Williams7405f742007-01-02 11:10:43 -0700372#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
373
Chris Leechc13c8262006-05-23 17:18:44 -0700374/**
Dan Williams7405f742007-01-02 11:10:43 -0700375 * dma_async_is_tx_complete - poll for transaction completion
Chris Leechc13c8262006-05-23 17:18:44 -0700376 * @chan: DMA channel
377 * @cookie: transaction identifier to check status of
378 * @last: returns last completed cookie, can be NULL
379 * @used: returns last issued cookie, can be NULL
380 *
381 * If @last and @used are passed in, upon return they reflect the driver
382 * internal state and can be used with dma_async_is_complete() to check
383 * the status of multiple cookies without re-checking hardware state.
384 */
Dan Williams7405f742007-01-02 11:10:43 -0700385static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
Chris Leechc13c8262006-05-23 17:18:44 -0700386 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
387{
Dan Williams7405f742007-01-02 11:10:43 -0700388 return chan->device->device_is_tx_complete(chan, cookie, last, used);
Chris Leechc13c8262006-05-23 17:18:44 -0700389}
390
Dan Williams7405f742007-01-02 11:10:43 -0700391#define dma_async_memcpy_complete(chan, cookie, last, used)\
392 dma_async_is_tx_complete(chan, cookie, last, used)
393
Chris Leechc13c8262006-05-23 17:18:44 -0700394/**
395 * dma_async_is_complete - test a cookie against chan state
396 * @cookie: transaction identifier to test status of
397 * @last_complete: last know completed transaction
398 * @last_used: last cookie value handed out
399 *
400 * dma_async_is_complete() is used in dma_async_memcpy_complete()
401 * the test logic is seperated for lightweight testing of multiple cookies
402 */
403static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
404 dma_cookie_t last_complete, dma_cookie_t last_used)
405{
406 if (last_complete <= last_used) {
407 if ((cookie <= last_complete) || (cookie > last_used))
408 return DMA_SUCCESS;
409 } else {
410 if ((cookie <= last_complete) && (cookie > last_used))
411 return DMA_SUCCESS;
412 }
413 return DMA_IN_PROGRESS;
414}
415
Dan Williams7405f742007-01-02 11:10:43 -0700416enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
Chris Leechc13c8262006-05-23 17:18:44 -0700417
418/* --- DMA device --- */
419
420int dma_async_device_register(struct dma_device *device);
421void dma_async_device_unregister(struct dma_device *device);
422
Chris Leechde5506e2006-05-23 17:50:37 -0700423/* --- Helper iov-locking functions --- */
424
425struct dma_page_list {
426 char *base_address;
427 int nr_pages;
428 struct page **pages;
429};
430
431struct dma_pinned_list {
432 int nr_iovecs;
433 struct dma_page_list page_list[0];
434};
435
436struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
437void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
438
439dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
440 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
441dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
442 struct dma_pinned_list *pinned_list, struct page *page,
443 unsigned int offset, size_t len);
444
Chris Leechc13c8262006-05-23 17:18:44 -0700445#endif /* DMAENGINE_H */