cifs: fix misannotations
[linux-2.6-block.git] / include / linux / dmaengine.h
CommitLineData
c13c8260
CL
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21#ifndef DMAENGINE_H
22#define DMAENGINE_H
1c0f16e5 23
c13c8260
CL
24#include <linux/device.h>
25#include <linux/uio.h>
26#include <linux/kref.h>
27#include <linux/completion.h>
28#include <linux/rcupdate.h>
7405f74b 29#include <linux/dma-mapping.h>
c13c8260
CL
30
31/**
fd3f8984 32 * enum dma_state - resource PNP/power management state
c13c8260
CL
33 * @DMA_RESOURCE_SUSPEND: DMA device going into low power state
34 * @DMA_RESOURCE_RESUME: DMA device returning to full power
d379b01e 35 * @DMA_RESOURCE_AVAILABLE: DMA device available to the system
c13c8260
CL
36 * @DMA_RESOURCE_REMOVED: DMA device removed from the system
37 */
d379b01e 38enum dma_state {
c13c8260
CL
39 DMA_RESOURCE_SUSPEND,
40 DMA_RESOURCE_RESUME,
d379b01e 41 DMA_RESOURCE_AVAILABLE,
c13c8260
CL
42 DMA_RESOURCE_REMOVED,
43};
44
d379b01e
DW
45/**
46 * enum dma_state_client - state of the channel in the client
47 * @DMA_ACK: client would like to use, or was using this channel
48 * @DMA_DUP: client has already seen this channel, or is not using this channel
49 * @DMA_NAK: client does not want to see any more channels
50 */
51enum dma_state_client {
52 DMA_ACK,
53 DMA_DUP,
54 DMA_NAK,
55};
56
c13c8260 57/**
fe4ada2d 58 * typedef dma_cookie_t - an opaque DMA cookie
c13c8260
CL
59 *
60 * if dma_cookie_t is >0 it's a DMA request cookie, <0 it's an error code
61 */
62typedef s32 dma_cookie_t;
63
64#define dma_submit_error(cookie) ((cookie) < 0 ? 1 : 0)
65
66/**
67 * enum dma_status - DMA transaction status
68 * @DMA_SUCCESS: transaction completed successfully
69 * @DMA_IN_PROGRESS: transaction not yet processed
70 * @DMA_ERROR: transaction failed
71 */
72enum dma_status {
73 DMA_SUCCESS,
74 DMA_IN_PROGRESS,
75 DMA_ERROR,
76};
77
7405f74b
DW
78/**
79 * enum dma_transaction_type - DMA transaction types/indexes
80 */
81enum dma_transaction_type {
82 DMA_MEMCPY,
83 DMA_XOR,
84 DMA_PQ_XOR,
85 DMA_DUAL_XOR,
86 DMA_PQ_UPDATE,
87 DMA_ZERO_SUM,
88 DMA_PQ_ZERO_SUM,
89 DMA_MEMSET,
90 DMA_MEMCPY_CRC32C,
91 DMA_INTERRUPT,
92};
93
94/* last transaction type for creation of the capabilities mask */
95#define DMA_TX_TYPE_END (DMA_INTERRUPT + 1)
96
d4c56f97
DW
97/**
98 * enum dma_prep_flags - DMA flags to augment operation preparation
99 * @DMA_PREP_INTERRUPT - trigger an interrupt (callback) upon completion of
100 * this transaction
101 */
102enum dma_prep_flags {
103 DMA_PREP_INTERRUPT = (1 << 0),
104};
105
7405f74b
DW
106/**
107 * dma_cap_mask_t - capabilities bitmap modeled after cpumask_t.
108 * See linux/cpumask.h
109 */
110typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
111
c13c8260
CL
112/**
113 * struct dma_chan_percpu - the per-CPU part of struct dma_chan
114 * @refcount: local_t used for open-coded "bigref" counting
115 * @memcpy_count: transaction counter
116 * @bytes_transferred: byte counter
117 */
118
119struct dma_chan_percpu {
120 local_t refcount;
121 /* stats */
122 unsigned long memcpy_count;
123 unsigned long bytes_transferred;
124};
125
126/**
127 * struct dma_chan - devices supply DMA channels, clients use them
fe4ada2d 128 * @device: ptr to the dma device who supplies this channel, always !%NULL
c13c8260 129 * @cookie: last cookie value returned to client
fe4ada2d
RD
130 * @chan_id: channel ID for sysfs
131 * @class_dev: class device for sysfs
c13c8260 132 * @refcount: kref, used in "bigref" slow-mode
fe4ada2d
RD
133 * @slow_ref: indicates that the DMA channel is free
134 * @rcu: the DMA channel's RCU head
c13c8260
CL
135 * @device_node: used to add this to the device chan list
136 * @local: per-cpu pointer to a struct dma_chan_percpu
137 */
138struct dma_chan {
c13c8260
CL
139 struct dma_device *device;
140 dma_cookie_t cookie;
141
142 /* sysfs */
143 int chan_id;
891f78ea 144 struct device dev;
c13c8260
CL
145
146 struct kref refcount;
147 int slow_ref;
148 struct rcu_head rcu;
149
c13c8260
CL
150 struct list_head device_node;
151 struct dma_chan_percpu *local;
152};
153
891f78ea 154#define to_dma_chan(p) container_of(p, struct dma_chan, dev)
d379b01e 155
c13c8260
CL
156void dma_chan_cleanup(struct kref *kref);
157
158static inline void dma_chan_get(struct dma_chan *chan)
159{
160 if (unlikely(chan->slow_ref))
161 kref_get(&chan->refcount);
162 else {
163 local_inc(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
164 put_cpu();
165 }
166}
167
168static inline void dma_chan_put(struct dma_chan *chan)
169{
170 if (unlikely(chan->slow_ref))
171 kref_put(&chan->refcount, dma_chan_cleanup);
172 else {
173 local_dec(&(per_cpu_ptr(chan->local, get_cpu())->refcount));
174 put_cpu();
175 }
176}
177
178/*
179 * typedef dma_event_callback - function pointer to a DMA event callback
d379b01e
DW
180 * For each channel added to the system this routine is called for each client.
181 * If the client would like to use the channel it returns '1' to signal (ack)
182 * the dmaengine core to take out a reference on the channel and its
183 * corresponding device. A client must not 'ack' an available channel more
184 * than once. When a channel is removed all clients are notified. If a client
185 * is using the channel it must 'ack' the removal. A client must not 'ack' a
186 * removed channel more than once.
187 * @client - 'this' pointer for the client context
188 * @chan - channel to be acted upon
189 * @state - available or removed
c13c8260 190 */
d379b01e
DW
191struct dma_client;
192typedef enum dma_state_client (*dma_event_callback) (struct dma_client *client,
193 struct dma_chan *chan, enum dma_state state);
c13c8260
CL
194
195/**
196 * struct dma_client - info on the entity making use of DMA services
197 * @event_callback: func ptr to call when something happens
d379b01e
DW
198 * @cap_mask: only return channels that satisfy the requested capabilities
199 * a value of zero corresponds to any capability
c13c8260
CL
200 * @global_node: list_head for global dma_client_list
201 */
202struct dma_client {
203 dma_event_callback event_callback;
d379b01e 204 dma_cap_mask_t cap_mask;
c13c8260
CL
205 struct list_head global_node;
206};
207
7405f74b
DW
208typedef void (*dma_async_tx_callback)(void *dma_async_param);
209/**
210 * struct dma_async_tx_descriptor - async transaction descriptor
211 * ---dma generic offload fields---
212 * @cookie: tracking cookie for this transaction, set to -EBUSY if
213 * this tx is sitting on a dependency list
214 * @ack: the descriptor can not be reused until the client acknowledges
215 * receipt, i.e. has has a chance to establish any dependency chains
216 * @phys: physical address of the descriptor
217 * @tx_list: driver common field for operations that require multiple
218 * descriptors
219 * @chan: target channel for this operation
220 * @tx_submit: set the prepared descriptor(s) to be executed by the engine
7405f74b
DW
221 * @callback: routine to call after this operation is complete
222 * @callback_param: general parameter to pass to the callback routine
223 * ---async_tx api specific fields---
224 * @depend_list: at completion this list of transactions are submitted
225 * @depend_node: allow this transaction to be executed after another
226 * transaction has completed, possibly on another channel
227 * @parent: pointer to the next level up in the dependency chain
228 * @lock: protect the dependency list
229 */
230struct dma_async_tx_descriptor {
231 dma_cookie_t cookie;
232 int ack;
233 dma_addr_t phys;
234 struct list_head tx_list;
235 struct dma_chan *chan;
236 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx);
7405f74b
DW
237 dma_async_tx_callback callback;
238 void *callback_param;
239 struct list_head depend_list;
240 struct list_head depend_node;
241 struct dma_async_tx_descriptor *parent;
242 spinlock_t lock;
243};
244
c13c8260
CL
245/**
246 * struct dma_device - info on the entity supplying DMA services
247 * @chancnt: how many DMA channels are supported
248 * @channels: the list of struct dma_chan
249 * @global_node: list_head for global dma_device_list
7405f74b
DW
250 * @cap_mask: one or more dma_capability flags
251 * @max_xor: maximum number of xor sources, 0 if no capability
fe4ada2d
RD
252 * @refcount: reference count
253 * @done: IO completion struct
254 * @dev_id: unique device ID
7405f74b 255 * @dev: struct device reference for dma mapping api
fe4ada2d
RD
256 * @device_alloc_chan_resources: allocate resources and return the
257 * number of allocated descriptors
258 * @device_free_chan_resources: release DMA channel's resources
7405f74b
DW
259 * @device_prep_dma_memcpy: prepares a memcpy operation
260 * @device_prep_dma_xor: prepares a xor operation
261 * @device_prep_dma_zero_sum: prepares a zero_sum operation
262 * @device_prep_dma_memset: prepares a memset operation
263 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
264 * @device_dependency_added: async_tx notifies the channel about new deps
265 * @device_issue_pending: push pending transactions to hardware
c13c8260
CL
266 */
267struct dma_device {
268
269 unsigned int chancnt;
270 struct list_head channels;
271 struct list_head global_node;
7405f74b
DW
272 dma_cap_mask_t cap_mask;
273 int max_xor;
c13c8260
CL
274
275 struct kref refcount;
276 struct completion done;
277
278 int dev_id;
7405f74b 279 struct device *dev;
c13c8260
CL
280
281 int (*device_alloc_chan_resources)(struct dma_chan *chan);
282 void (*device_free_chan_resources)(struct dma_chan *chan);
7405f74b
DW
283
284 struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
0036731c 285 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d4c56f97 286 size_t len, unsigned long flags);
7405f74b 287 struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
0036731c 288 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
d4c56f97 289 unsigned int src_cnt, size_t len, unsigned long flags);
7405f74b 290 struct dma_async_tx_descriptor *(*device_prep_dma_zero_sum)(
0036731c 291 struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt,
d4c56f97 292 size_t len, u32 *result, unsigned long flags);
7405f74b 293 struct dma_async_tx_descriptor *(*device_prep_dma_memset)(
0036731c 294 struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
d4c56f97 295 unsigned long flags);
7405f74b
DW
296 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)(
297 struct dma_chan *chan);
298
299 void (*device_dependency_added)(struct dma_chan *chan);
300 enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
c13c8260
CL
301 dma_cookie_t cookie, dma_cookie_t *last,
302 dma_cookie_t *used);
7405f74b 303 void (*device_issue_pending)(struct dma_chan *chan);
c13c8260
CL
304};
305
306/* --- public DMA engine API --- */
307
d379b01e 308void dma_async_client_register(struct dma_client *client);
c13c8260 309void dma_async_client_unregister(struct dma_client *client);
d379b01e 310void dma_async_client_chan_request(struct dma_client *client);
7405f74b
DW
311dma_cookie_t dma_async_memcpy_buf_to_buf(struct dma_chan *chan,
312 void *dest, void *src, size_t len);
313dma_cookie_t dma_async_memcpy_buf_to_pg(struct dma_chan *chan,
314 struct page *page, unsigned int offset, void *kdata, size_t len);
315dma_cookie_t dma_async_memcpy_pg_to_pg(struct dma_chan *chan,
316 struct page *dest_pg, unsigned int dest_off, struct page *src_pg,
317 unsigned int src_off, size_t len);
318void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
319 struct dma_chan *chan);
c13c8260 320
7405f74b
DW
321static inline void
322async_tx_ack(struct dma_async_tx_descriptor *tx)
323{
324 tx->ack = 1;
c13c8260
CL
325}
326
7405f74b
DW
327#define first_dma_cap(mask) __first_dma_cap(&(mask))
328static inline int __first_dma_cap(const dma_cap_mask_t *srcp)
c13c8260 329{
7405f74b
DW
330 return min_t(int, DMA_TX_TYPE_END,
331 find_first_bit(srcp->bits, DMA_TX_TYPE_END));
332}
c13c8260 333
7405f74b
DW
334#define next_dma_cap(n, mask) __next_dma_cap((n), &(mask))
335static inline int __next_dma_cap(int n, const dma_cap_mask_t *srcp)
336{
337 return min_t(int, DMA_TX_TYPE_END,
338 find_next_bit(srcp->bits, DMA_TX_TYPE_END, n+1));
c13c8260
CL
339}
340
7405f74b
DW
341#define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask))
342static inline void
343__dma_cap_set(enum dma_transaction_type tx_type, dma_cap_mask_t *dstp)
c13c8260 344{
7405f74b
DW
345 set_bit(tx_type, dstp->bits);
346}
c13c8260 347
7405f74b
DW
348#define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask))
349static inline int
350__dma_has_cap(enum dma_transaction_type tx_type, dma_cap_mask_t *srcp)
351{
352 return test_bit(tx_type, srcp->bits);
c13c8260
CL
353}
354
7405f74b
DW
355#define for_each_dma_cap_mask(cap, mask) \
356 for ((cap) = first_dma_cap(mask); \
357 (cap) < DMA_TX_TYPE_END; \
358 (cap) = next_dma_cap((cap), (mask)))
359
c13c8260 360/**
7405f74b 361 * dma_async_issue_pending - flush pending transactions to HW
fe4ada2d 362 * @chan: target DMA channel
c13c8260
CL
363 *
364 * This allows drivers to push copies to HW in batches,
365 * reducing MMIO writes where possible.
366 */
7405f74b 367static inline void dma_async_issue_pending(struct dma_chan *chan)
c13c8260 368{
ec8670f1 369 chan->device->device_issue_pending(chan);
c13c8260
CL
370}
371
7405f74b
DW
372#define dma_async_memcpy_issue_pending(chan) dma_async_issue_pending(chan)
373
c13c8260 374/**
7405f74b 375 * dma_async_is_tx_complete - poll for transaction completion
c13c8260
CL
376 * @chan: DMA channel
377 * @cookie: transaction identifier to check status of
378 * @last: returns last completed cookie, can be NULL
379 * @used: returns last issued cookie, can be NULL
380 *
381 * If @last and @used are passed in, upon return they reflect the driver
382 * internal state and can be used with dma_async_is_complete() to check
383 * the status of multiple cookies without re-checking hardware state.
384 */
7405f74b 385static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan,
c13c8260
CL
386 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used)
387{
7405f74b 388 return chan->device->device_is_tx_complete(chan, cookie, last, used);
c13c8260
CL
389}
390
7405f74b
DW
391#define dma_async_memcpy_complete(chan, cookie, last, used)\
392 dma_async_is_tx_complete(chan, cookie, last, used)
393
c13c8260
CL
394/**
395 * dma_async_is_complete - test a cookie against chan state
396 * @cookie: transaction identifier to test status of
397 * @last_complete: last know completed transaction
398 * @last_used: last cookie value handed out
399 *
400 * dma_async_is_complete() is used in dma_async_memcpy_complete()
401 * the test logic is seperated for lightweight testing of multiple cookies
402 */
403static inline enum dma_status dma_async_is_complete(dma_cookie_t cookie,
404 dma_cookie_t last_complete, dma_cookie_t last_used)
405{
406 if (last_complete <= last_used) {
407 if ((cookie <= last_complete) || (cookie > last_used))
408 return DMA_SUCCESS;
409 } else {
410 if ((cookie <= last_complete) && (cookie > last_used))
411 return DMA_SUCCESS;
412 }
413 return DMA_IN_PROGRESS;
414}
415
7405f74b 416enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
c13c8260
CL
417
418/* --- DMA device --- */
419
420int dma_async_device_register(struct dma_device *device);
421void dma_async_device_unregister(struct dma_device *device);
422
de5506e1
CL
423/* --- Helper iov-locking functions --- */
424
425struct dma_page_list {
426 char *base_address;
427 int nr_pages;
428 struct page **pages;
429};
430
431struct dma_pinned_list {
432 int nr_iovecs;
433 struct dma_page_list page_list[0];
434};
435
436struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
437void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
438
439dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
440 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
441dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
442 struct dma_pinned_list *pinned_list, struct page *page,
443 unsigned int offset, size_t len);
444
c13c8260 445#endif /* DMAENGINE_H */