2 * xor offload engine api
4 * Copyright © 2006, Intel Corporation.
6 * Dan Williams <dan.j.williams@intel.com>
8 * with architecture considerations by:
9 * Neil Brown <neilb@suse.de>
10 * Jeff Garzik <jeff@garzik.org>
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * This program is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <linux/kernel.h>
27 #include <linux/interrupt.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/raid/xor.h>
31 #include <linux/async_tx.h>
33 /* do_async_xor - dma map the pages and perform the xor with an engine.
34 * This routine is marked __always_inline so it can be compiled away
35 * when CONFIG_DMA_ENGINE=n
37 static __always_inline struct dma_async_tx_descriptor *
38 do_async_xor(struct dma_device *device,
39 struct dma_chan *chan, struct page *dest, struct page **src_list,
40 unsigned int offset, unsigned int src_cnt, size_t len,
41 enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
42 dma_async_tx_callback cb_fn, void *cb_param)
45 dma_addr_t *dma_src = (dma_addr_t *) src_list;
46 struct dma_async_tx_descriptor *tx;
48 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
50 pr_debug("%s: len: %zu\n", __FUNCTION__, len);
52 dma_dest = dma_map_page(device->dev, dest, offset, len,
55 for (i = 0; i < src_cnt; i++)
56 dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
59 /* Since we have clobbered the src_list we are committed
60 * to doing this asynchronously. Drivers force forward progress
61 * in case they can not provide a descriptor
63 tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
67 dma_wait_for_async_tx(depend_tx);
70 tx = device->device_prep_dma_xor(chan, dma_dest,
71 dma_src, src_cnt, len,
75 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
81 do_sync_xor(struct page *dest, struct page **src_list, unsigned int offset,
82 unsigned int src_cnt, size_t len, enum async_tx_flags flags,
83 struct dma_async_tx_descriptor *depend_tx,
84 dma_async_tx_callback cb_fn, void *cb_param)
89 pr_debug("%s: len: %zu\n", __FUNCTION__, len);
91 /* reuse the 'src_list' array to convert to buffer pointers */
92 for (i = 0; i < src_cnt; i++)
93 src_list[i] = (struct page *)
94 (page_address(src_list[i]) + offset);
96 /* set destination address */
97 _dest = page_address(dest) + offset;
99 if (flags & ASYNC_TX_XOR_ZERO_DST)
100 memset(_dest, 0, len);
102 xor_blocks(src_cnt, len, _dest,
105 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
109 * async_xor - attempt to xor a set of blocks with a dma engine.
110 * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
111 * flag must be set to not include dest data in the calculation. The
112 * assumption with dma eninges is that they only use the destination
113 * buffer as a source when it is explicity specified in the source list.
114 * @dest: destination page
115 * @src_list: array of source pages (if the dest is also a source it must be
116 * at index zero). The contents of this array may be overwritten.
117 * @offset: offset in pages to start transaction
118 * @src_cnt: number of source pages
119 * @len: length in bytes
120 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
121 * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
122 * @depend_tx: xor depends on the result of this transaction.
123 * @cb_fn: function to call when the xor completes
124 * @cb_param: parameter to pass to the callback routine
126 struct dma_async_tx_descriptor *
127 async_xor(struct page *dest, struct page **src_list, unsigned int offset,
128 int src_cnt, size_t len, enum async_tx_flags flags,
129 struct dma_async_tx_descriptor *depend_tx,
130 dma_async_tx_callback cb_fn, void *cb_param)
132 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR);
133 struct dma_device *device = chan ? chan->device : NULL;
134 struct dma_async_tx_descriptor *tx = NULL;
135 dma_async_tx_callback _cb_fn;
137 unsigned long local_flags;
139 int i = 0, src_off = 0;
141 BUG_ON(src_cnt <= 1);
145 if (device) { /* run the xor asynchronously */
146 xor_src_cnt = min(src_cnt, device->max_xor);
147 /* if we are submitting additional xors
148 * only set the callback on the last transaction
150 if (src_cnt > xor_src_cnt) {
151 local_flags &= ~ASYNC_TX_ACK;
156 _cb_param = cb_param;
159 tx = do_async_xor(device, chan, dest,
160 &src_list[src_off], offset,
161 xor_src_cnt, len, local_flags,
162 depend_tx, _cb_fn, _cb_param);
163 } else { /* run the xor synchronously */
164 /* in the sync case the dest is an implied source
165 * (assumes the dest is at the src_off index)
167 if (flags & ASYNC_TX_XOR_DROP_DST) {
172 /* process up to 'MAX_XOR_BLOCKS' sources */
173 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
175 /* if we are submitting additional xors
176 * only set the callback on the last transaction
178 if (src_cnt > xor_src_cnt) {
179 local_flags &= ~ASYNC_TX_ACK;
184 _cb_param = cb_param;
187 /* wait for any prerequisite operations */
189 /* if ack is already set then we cannot be sure
190 * we are referring to the correct operation
192 BUG_ON(depend_tx->ack);
193 if (dma_wait_for_async_tx(depend_tx) ==
195 panic("%s: DMA_ERROR waiting for "
200 do_sync_xor(dest, &src_list[src_off], offset,
201 xor_src_cnt, len, local_flags, depend_tx,
205 /* the previous tx is hidden from the client,
209 async_tx_ack(depend_tx);
213 if (src_cnt > xor_src_cnt) {
214 /* drop completed sources */
215 src_cnt -= xor_src_cnt;
216 src_off += xor_src_cnt;
218 /* unconditionally preserve the destination */
219 flags &= ~ASYNC_TX_XOR_ZERO_DST;
221 /* use the intermediate result a source, but remember
222 * it's dropped, because it's implied, in the sync case
224 src_list[--src_off] = dest;
226 flags |= ASYNC_TX_XOR_DROP_DST;
234 EXPORT_SYMBOL_GPL(async_xor);
236 static int page_is_zero(struct page *p, unsigned int offset, size_t len)
238 char *a = page_address(p) + offset;
239 return ((*(u32 *) a) == 0 &&
240 memcmp(a, a + 4, len - 4) == 0);
244 * async_xor_zero_sum - attempt a xor parity check with a dma engine.
245 * @dest: destination page used if the xor is performed synchronously
246 * @src_list: array of source pages. The dest page must be listed as a source
247 * at index zero. The contents of this array may be overwritten.
248 * @offset: offset in pages to start transaction
249 * @src_cnt: number of source pages
250 * @len: length in bytes
251 * @result: 0 if sum == 0 else non-zero
252 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
253 * @depend_tx: xor depends on the result of this transaction.
254 * @cb_fn: function to call when the xor completes
255 * @cb_param: parameter to pass to the callback routine
257 struct dma_async_tx_descriptor *
258 async_xor_zero_sum(struct page *dest, struct page **src_list,
259 unsigned int offset, int src_cnt, size_t len,
260 u32 *result, enum async_tx_flags flags,
261 struct dma_async_tx_descriptor *depend_tx,
262 dma_async_tx_callback cb_fn, void *cb_param)
264 struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM);
265 struct dma_device *device = chan ? chan->device : NULL;
266 struct dma_async_tx_descriptor *tx = NULL;
268 BUG_ON(src_cnt <= 1);
271 dma_addr_t *dma_src = (dma_addr_t *) src_list;
272 unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
275 pr_debug("%s: (async) len: %zu\n", __FUNCTION__, len);
277 for (i = 0; i < src_cnt; i++)
278 dma_src[i] = dma_map_page(device->dev, src_list[i],
279 offset, len, DMA_TO_DEVICE);
281 tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
286 dma_wait_for_async_tx(depend_tx);
289 tx = device->device_prep_dma_zero_sum(chan,
290 dma_src, src_cnt, len, result,
294 async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
296 unsigned long xor_flags = flags;
298 pr_debug("%s: (sync) len: %zu\n", __FUNCTION__, len);
300 xor_flags |= ASYNC_TX_XOR_DROP_DST;
301 xor_flags &= ~ASYNC_TX_ACK;
303 tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
304 depend_tx, NULL, NULL);
307 if (dma_wait_for_async_tx(tx) == DMA_ERROR)
308 panic("%s: DMA_ERROR waiting for tx\n",
313 *result = page_is_zero(dest, offset, len) ? 0 : 1;
317 async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
322 EXPORT_SYMBOL_GPL(async_xor_zero_sum);
324 static int __init async_xor_init(void)
326 #ifdef CONFIG_DMA_ENGINE
327 /* To conserve stack space the input src_list (array of page pointers)
328 * is reused to hold the array of dma addresses passed to the driver.
329 * This conversion is only possible when dma_addr_t is less than the
330 * the size of a pointer. HIGHMEM64G is known to violate this
333 BUILD_BUG_ON(sizeof(dma_addr_t) > sizeof(struct page *));
339 static void __exit async_xor_exit(void)
344 module_init(async_xor_init);
345 module_exit(async_xor_exit);
347 MODULE_AUTHOR("Intel Corporation");
348 MODULE_DESCRIPTION("asynchronous xor/xor-zero-sum api");
349 MODULE_LICENSE("GPL");