Commit | Line | Data |
---|---|---|
743e1c8f AP |
1 | /* |
2 | * Copyright (C) 2017 Broadcom | |
3 | * | |
d5c33487 AP |
4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of the GNU General Public License as | |
6 | * published by the Free Software Foundation version 2. | |
7 | * | |
8 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
9 | * kind, whether express or implied; without even the implied warranty | |
10 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | * GNU General Public License for more details. | |
743e1c8f AP |
12 | */ |
13 | ||
14 | /* | |
15 | * Broadcom SBA RAID Driver | |
16 | * | |
17 | * The Broadcom stream buffer accelerator (SBA) provides offloading | |
18 | * capabilities for RAID operations. The SBA offload engine is accessible | |
19 | * via Broadcom SoC specific ring manager. Two or more offload engines | |
20 | * can share same Broadcom SoC specific ring manager due to this Broadcom | |
21 | * SoC specific ring manager driver is implemented as a mailbox controller | |
22 | * driver and offload engine drivers are implemented as mallbox clients. | |
23 | * | |
24 | * Typically, Broadcom SoC specific ring manager will implement larger | |
25 | * number of hardware rings over one or more SBA hardware devices. By | |
26 | * design, the internal buffer size of SBA hardware device is limited | |
27 | * but all offload operations supported by SBA can be broken down into | |
28 | * multiple small size requests and executed parallely on multiple SBA | |
29 | * hardware devices for achieving high through-put. | |
30 | * | |
31 | * The Broadcom SBA RAID driver does not require any register programming | |
32 | * except submitting request to SBA hardware device via mailbox channels. | |
4e9f8187 AP |
33 | * This driver implements a DMA device with one DMA channel using a single |
34 | * mailbox channel provided by Broadcom SoC specific ring manager driver. | |
743e1c8f AP |
35 | * For having more SBA DMA channels, we can create more SBA device nodes |
36 | * in Broadcom SoC specific DTS based on number of hardware rings supported | |
37 | * by Broadcom SoC ring manager. | |
38 | */ | |
39 | ||
40 | #include <linux/bitops.h> | |
8529a927 | 41 | #include <linux/debugfs.h> |
743e1c8f AP |
42 | #include <linux/dma-mapping.h> |
43 | #include <linux/dmaengine.h> | |
44 | #include <linux/list.h> | |
45 | #include <linux/mailbox_client.h> | |
46 | #include <linux/mailbox/brcm-message.h> | |
47 | #include <linux/module.h> | |
48 | #include <linux/of_device.h> | |
49 | #include <linux/slab.h> | |
50 | #include <linux/raid/pq.h> | |
51 | ||
52 | #include "dmaengine.h" | |
53 | ||
e897091a AP |
54 | /* ====== Driver macros and defines ===== */ |
55 | ||
743e1c8f AP |
56 | #define SBA_TYPE_SHIFT 48 |
57 | #define SBA_TYPE_MASK GENMASK(1, 0) | |
58 | #define SBA_TYPE_A 0x0 | |
59 | #define SBA_TYPE_B 0x2 | |
60 | #define SBA_TYPE_C 0x3 | |
61 | #define SBA_USER_DEF_SHIFT 32 | |
62 | #define SBA_USER_DEF_MASK GENMASK(15, 0) | |
63 | #define SBA_R_MDATA_SHIFT 24 | |
64 | #define SBA_R_MDATA_MASK GENMASK(7, 0) | |
65 | #define SBA_C_MDATA_MS_SHIFT 18 | |
66 | #define SBA_C_MDATA_MS_MASK GENMASK(1, 0) | |
67 | #define SBA_INT_SHIFT 17 | |
68 | #define SBA_INT_MASK BIT(0) | |
69 | #define SBA_RESP_SHIFT 16 | |
70 | #define SBA_RESP_MASK BIT(0) | |
71 | #define SBA_C_MDATA_SHIFT 8 | |
72 | #define SBA_C_MDATA_MASK GENMASK(7, 0) | |
73 | #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum)) | |
74 | #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0) | |
75 | #define SBA_C_MDATA_DNUM_SHIFT 5 | |
76 | #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0) | |
77 | #define SBA_C_MDATA_LS(__v) ((__v) & 0xff) | |
78 | #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3) | |
79 | #define SBA_CMD_SHIFT 0 | |
80 | #define SBA_CMD_MASK GENMASK(3, 0) | |
81 | #define SBA_CMD_ZERO_BUFFER 0x4 | |
82 | #define SBA_CMD_ZERO_ALL_BUFFERS 0x8 | |
83 | #define SBA_CMD_LOAD_BUFFER 0x9 | |
84 | #define SBA_CMD_XOR 0xa | |
85 | #define SBA_CMD_GALOIS_XOR 0xb | |
86 | #define SBA_CMD_WRITE_BUFFER 0xc | |
87 | #define SBA_CMD_GALOIS 0xe | |
88 | ||
5346aafc | 89 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 |
4e9f8187 | 90 | #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8 |
5346aafc | 91 | |
743e1c8f AP |
92 | /* Driver helper macros */ |
93 | #define to_sba_request(tx) \ | |
94 | container_of(tx, struct sba_request, tx) | |
95 | #define to_sba_device(dchan) \ | |
96 | container_of(dchan, struct sba_device, dma_chan) | |
97 | ||
e897091a AP |
98 | /* ===== Driver data structures ===== */ |
99 | ||
57a28508 AP |
100 | enum sba_request_flags { |
101 | SBA_REQUEST_STATE_FREE = 0x001, | |
102 | SBA_REQUEST_STATE_ALLOCED = 0x002, | |
103 | SBA_REQUEST_STATE_PENDING = 0x004, | |
104 | SBA_REQUEST_STATE_ACTIVE = 0x008, | |
ecbf9ef1 | 105 | SBA_REQUEST_STATE_ABORTED = 0x010, |
57a28508 AP |
106 | SBA_REQUEST_STATE_MASK = 0x0ff, |
107 | SBA_REQUEST_FENCE = 0x100, | |
743e1c8f AP |
108 | }; |
109 | ||
110 | struct sba_request { | |
111 | /* Global state */ | |
112 | struct list_head node; | |
113 | struct sba_device *sba; | |
57a28508 | 114 | u32 flags; |
743e1c8f AP |
115 | /* Chained requests management */ |
116 | struct sba_request *first; | |
117 | struct list_head next; | |
743e1c8f AP |
118 | atomic_t next_pending_count; |
119 | /* BRCM message data */ | |
743e1c8f AP |
120 | struct brcm_message msg; |
121 | struct dma_async_tx_descriptor tx; | |
5655e00f AP |
122 | /* SBA commands */ |
123 | struct brcm_sba_command cmds[0]; | |
743e1c8f AP |
124 | }; |
125 | ||
126 | enum sba_version { | |
127 | SBA_VER_1 = 0, | |
128 | SBA_VER_2 | |
129 | }; | |
130 | ||
131 | struct sba_device { | |
132 | /* Underlying device */ | |
133 | struct device *dev; | |
134 | /* DT configuration parameters */ | |
135 | enum sba_version ver; | |
136 | /* Derived configuration parameters */ | |
137 | u32 max_req; | |
138 | u32 hw_buf_size; | |
139 | u32 hw_resp_size; | |
140 | u32 max_pq_coefs; | |
141 | u32 max_pq_srcs; | |
142 | u32 max_cmd_per_req; | |
143 | u32 max_xor_srcs; | |
144 | u32 max_resp_pool_size; | |
145 | u32 max_cmds_pool_size; | |
146 | /* Maibox client and Mailbox channels */ | |
147 | struct mbox_client client; | |
4e9f8187 | 148 | struct mbox_chan *mchan; |
743e1c8f AP |
149 | struct device *mbox_dev; |
150 | /* DMA device and DMA channel */ | |
151 | struct dma_device dma_dev; | |
152 | struct dma_chan dma_chan; | |
153 | /* DMA channel resources */ | |
154 | void *resp_base; | |
155 | dma_addr_t resp_dma_base; | |
156 | void *cmds_base; | |
157 | dma_addr_t cmds_dma_base; | |
158 | spinlock_t reqs_lock; | |
743e1c8f AP |
159 | bool reqs_fence; |
160 | struct list_head reqs_alloc_list; | |
161 | struct list_head reqs_pending_list; | |
162 | struct list_head reqs_active_list; | |
743e1c8f AP |
163 | struct list_head reqs_aborted_list; |
164 | struct list_head reqs_free_list; | |
8529a927 AP |
165 | /* DebugFS directory entries */ |
166 | struct dentry *root; | |
743e1c8f AP |
167 | }; |
168 | ||
e897091a | 169 | /* ====== Command helper routines ===== */ |
743e1c8f AP |
170 | |
171 | static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) | |
172 | { | |
173 | cmd &= ~((u64)mask << shift); | |
174 | cmd |= ((u64)(val & mask) << shift); | |
175 | return cmd; | |
176 | } | |
177 | ||
178 | static inline u32 __pure sba_cmd_load_c_mdata(u32 b0) | |
179 | { | |
180 | return b0 & SBA_C_MDATA_BNUMx_MASK; | |
181 | } | |
182 | ||
183 | static inline u32 __pure sba_cmd_write_c_mdata(u32 b0) | |
184 | { | |
185 | return b0 & SBA_C_MDATA_BNUMx_MASK; | |
186 | } | |
187 | ||
188 | static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0) | |
189 | { | |
190 | return (b0 & SBA_C_MDATA_BNUMx_MASK) | | |
191 | ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)); | |
192 | } | |
193 | ||
194 | static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) | |
195 | { | |
196 | return (b0 & SBA_C_MDATA_BNUMx_MASK) | | |
197 | ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) | | |
198 | ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); | |
199 | } | |
200 | ||
e897091a | 201 | /* ====== General helper routines ===== */ |
743e1c8f AP |
202 | |
203 | static struct sba_request *sba_alloc_request(struct sba_device *sba) | |
204 | { | |
ecbf9ef1 | 205 | bool found = false; |
743e1c8f AP |
206 | unsigned long flags; |
207 | struct sba_request *req = NULL; | |
208 | ||
209 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
ecbf9ef1 AP |
210 | list_for_each_entry(req, &sba->reqs_free_list, node) { |
211 | if (async_tx_test_ack(&req->tx)) { | |
212 | list_move_tail(&req->node, &sba->reqs_alloc_list); | |
213 | found = true; | |
214 | break; | |
215 | } | |
216 | } | |
743e1c8f | 217 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
6df8f913 | 218 | |
ecbf9ef1 | 219 | if (!found) { |
6df8f913 AP |
220 | /* |
221 | * We have no more free requests so, we peek | |
222 | * mailbox channels hoping few active requests | |
223 | * would have completed which will create more | |
224 | * room for new requests. | |
225 | */ | |
4e9f8187 | 226 | mbox_client_peek_data(sba->mchan); |
e4274cfa | 227 | return NULL; |
6df8f913 | 228 | } |
e4274cfa | 229 | |
57a28508 | 230 | req->flags = SBA_REQUEST_STATE_ALLOCED; |
e4274cfa AP |
231 | req->first = req; |
232 | INIT_LIST_HEAD(&req->next); | |
e4274cfa AP |
233 | atomic_set(&req->next_pending_count, 1); |
234 | ||
235 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | |
fd8eb539 | 236 | async_tx_ack(&req->tx); |
743e1c8f AP |
237 | |
238 | return req; | |
239 | } | |
240 | ||
241 | /* Note: Must be called with sba->reqs_lock held */ | |
242 | static void _sba_pending_request(struct sba_device *sba, | |
243 | struct sba_request *req) | |
244 | { | |
245 | lockdep_assert_held(&sba->reqs_lock); | |
57a28508 AP |
246 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
247 | req->flags |= SBA_REQUEST_STATE_PENDING; | |
743e1c8f AP |
248 | list_move_tail(&req->node, &sba->reqs_pending_list); |
249 | if (list_empty(&sba->reqs_active_list)) | |
250 | sba->reqs_fence = false; | |
251 | } | |
252 | ||
253 | /* Note: Must be called with sba->reqs_lock held */ | |
254 | static bool _sba_active_request(struct sba_device *sba, | |
255 | struct sba_request *req) | |
256 | { | |
257 | lockdep_assert_held(&sba->reqs_lock); | |
258 | if (list_empty(&sba->reqs_active_list)) | |
259 | sba->reqs_fence = false; | |
260 | if (sba->reqs_fence) | |
261 | return false; | |
57a28508 AP |
262 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
263 | req->flags |= SBA_REQUEST_STATE_ACTIVE; | |
743e1c8f | 264 | list_move_tail(&req->node, &sba->reqs_active_list); |
57a28508 | 265 | if (req->flags & SBA_REQUEST_FENCE) |
743e1c8f AP |
266 | sba->reqs_fence = true; |
267 | return true; | |
268 | } | |
269 | ||
270 | /* Note: Must be called with sba->reqs_lock held */ | |
271 | static void _sba_abort_request(struct sba_device *sba, | |
272 | struct sba_request *req) | |
273 | { | |
274 | lockdep_assert_held(&sba->reqs_lock); | |
57a28508 AP |
275 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
276 | req->flags |= SBA_REQUEST_STATE_ABORTED; | |
743e1c8f AP |
277 | list_move_tail(&req->node, &sba->reqs_aborted_list); |
278 | if (list_empty(&sba->reqs_active_list)) | |
279 | sba->reqs_fence = false; | |
280 | } | |
281 | ||
282 | /* Note: Must be called with sba->reqs_lock held */ | |
283 | static void _sba_free_request(struct sba_device *sba, | |
284 | struct sba_request *req) | |
285 | { | |
286 | lockdep_assert_held(&sba->reqs_lock); | |
57a28508 AP |
287 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
288 | req->flags |= SBA_REQUEST_STATE_FREE; | |
743e1c8f AP |
289 | list_move_tail(&req->node, &sba->reqs_free_list); |
290 | if (list_empty(&sba->reqs_active_list)) | |
291 | sba->reqs_fence = false; | |
743e1c8f AP |
292 | } |
293 | ||
743e1c8f AP |
294 | static void sba_free_chained_requests(struct sba_request *req) |
295 | { | |
296 | unsigned long flags; | |
297 | struct sba_request *nreq; | |
298 | struct sba_device *sba = req->sba; | |
299 | ||
300 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
301 | ||
302 | _sba_free_request(sba, req); | |
303 | list_for_each_entry(nreq, &req->next, next) | |
304 | _sba_free_request(sba, nreq); | |
305 | ||
306 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | |
307 | } | |
308 | ||
309 | static void sba_chain_request(struct sba_request *first, | |
310 | struct sba_request *req) | |
311 | { | |
312 | unsigned long flags; | |
313 | struct sba_device *sba = req->sba; | |
314 | ||
315 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
316 | ||
317 | list_add_tail(&req->next, &first->next); | |
318 | req->first = first; | |
10f1a330 | 319 | atomic_inc(&first->next_pending_count); |
743e1c8f AP |
320 | |
321 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | |
322 | } | |
323 | ||
324 | static void sba_cleanup_nonpending_requests(struct sba_device *sba) | |
325 | { | |
326 | unsigned long flags; | |
327 | struct sba_request *req, *req1; | |
328 | ||
329 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
330 | ||
331 | /* Freeup all alloced request */ | |
332 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) | |
333 | _sba_free_request(sba, req); | |
334 | ||
743e1c8f AP |
335 | /* Set all active requests as aborted */ |
336 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) | |
337 | _sba_abort_request(sba, req); | |
338 | ||
339 | /* | |
340 | * Note: We expect that aborted request will be eventually | |
341 | * freed by sba_receive_message() | |
342 | */ | |
343 | ||
344 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | |
345 | } | |
346 | ||
347 | static void sba_cleanup_pending_requests(struct sba_device *sba) | |
348 | { | |
349 | unsigned long flags; | |
350 | struct sba_request *req, *req1; | |
351 | ||
352 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
353 | ||
354 | /* Freeup all pending request */ | |
355 | list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) | |
356 | _sba_free_request(sba, req); | |
357 | ||
358 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | |
359 | } | |
360 | ||
743e1c8f AP |
361 | static int sba_send_mbox_request(struct sba_device *sba, |
362 | struct sba_request *req) | |
363 | { | |
4e9f8187 | 364 | int ret = 0; |
743e1c8f AP |
365 | |
366 | /* Send message for the request */ | |
367 | req->msg.error = 0; | |
4e9f8187 | 368 | ret = mbox_send_message(sba->mchan, &req->msg); |
743e1c8f AP |
369 | if (ret < 0) { |
370 | dev_err(sba->dev, "send message failed with error %d", ret); | |
371 | return ret; | |
372 | } | |
29e0f486 AP |
373 | |
374 | /* Check error returned by mailbox controller */ | |
743e1c8f AP |
375 | ret = req->msg.error; |
376 | if (ret < 0) { | |
377 | dev_err(sba->dev, "message error %d", ret); | |
743e1c8f AP |
378 | } |
379 | ||
29e0f486 | 380 | /* Signal txdone for mailbox channel */ |
4e9f8187 | 381 | mbox_client_txdone(sba->mchan, ret); |
29e0f486 AP |
382 | |
383 | return ret; | |
743e1c8f AP |
384 | } |
385 | ||
d6ffd239 AP |
386 | /* Note: Must be called with sba->reqs_lock held */ |
387 | static void _sba_process_pending_requests(struct sba_device *sba) | |
743e1c8f AP |
388 | { |
389 | int ret; | |
f8338514 | 390 | u32 count; |
f8338514 | 391 | struct sba_request *req; |
f8338514 | 392 | |
4e9f8187 AP |
393 | /* Process few pending requests */ |
394 | count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; | |
f8338514 AP |
395 | while (!list_empty(&sba->reqs_pending_list) && count) { |
396 | /* Get the first pending request */ | |
397 | req = list_first_entry(&sba->reqs_pending_list, | |
398 | struct sba_request, node); | |
399 | ||
743e1c8f AP |
400 | /* Try to make request active */ |
401 | if (!_sba_active_request(sba, req)) | |
402 | break; | |
403 | ||
404 | /* Send request to mailbox channel */ | |
743e1c8f | 405 | ret = sba_send_mbox_request(sba, req); |
743e1c8f AP |
406 | if (ret < 0) { |
407 | _sba_pending_request(sba, req); | |
408 | break; | |
409 | } | |
f8338514 AP |
410 | |
411 | count--; | |
412 | } | |
d6ffd239 | 413 | } |
f8338514 | 414 | |
d6ffd239 AP |
415 | static void sba_process_received_request(struct sba_device *sba, |
416 | struct sba_request *req) | |
417 | { | |
418 | unsigned long flags; | |
419 | struct dma_async_tx_descriptor *tx; | |
420 | struct sba_request *nreq, *first = req->first; | |
f8338514 | 421 | |
d6ffd239 AP |
422 | /* Process only after all chained requests are received */ |
423 | if (!atomic_dec_return(&first->next_pending_count)) { | |
424 | tx = &first->tx; | |
f8338514 AP |
425 | |
426 | WARN_ON(tx->cookie < 0); | |
427 | if (tx->cookie > 0) { | |
5d74aa7f | 428 | spin_lock_irqsave(&sba->reqs_lock, flags); |
f8338514 | 429 | dma_cookie_complete(tx); |
5d74aa7f | 430 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
f8338514 AP |
431 | dmaengine_desc_get_callback_invoke(tx, NULL); |
432 | dma_descriptor_unmap(tx); | |
433 | tx->callback = NULL; | |
434 | tx->callback_result = NULL; | |
435 | } | |
436 | ||
437 | dma_run_dependencies(tx); | |
438 | ||
439 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
440 | ||
d6ffd239 AP |
441 | /* Free all requests chained to first request */ |
442 | list_for_each_entry(nreq, &first->next, next) | |
443 | _sba_free_request(sba, nreq); | |
444 | INIT_LIST_HEAD(&first->next); | |
f8338514 | 445 | |
ecbf9ef1 AP |
446 | /* Free the first request */ |
447 | _sba_free_request(sba, first); | |
f8338514 | 448 | |
d6ffd239 AP |
449 | /* Process pending requests */ |
450 | _sba_process_pending_requests(sba); | |
f8338514 | 451 | |
d6ffd239 AP |
452 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
453 | } | |
f8338514 AP |
454 | } |
455 | ||
8529a927 AP |
456 | static void sba_write_stats_in_seqfile(struct sba_device *sba, |
457 | struct seq_file *file) | |
458 | { | |
459 | unsigned long flags; | |
460 | struct sba_request *req; | |
ecbf9ef1 AP |
461 | u32 free_count = 0, alloced_count = 0; |
462 | u32 pending_count = 0, active_count = 0, aborted_count = 0; | |
8529a927 AP |
463 | |
464 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
465 | ||
466 | list_for_each_entry(req, &sba->reqs_free_list, node) | |
ecbf9ef1 AP |
467 | if (async_tx_test_ack(&req->tx)) |
468 | free_count++; | |
8529a927 AP |
469 | |
470 | list_for_each_entry(req, &sba->reqs_alloc_list, node) | |
471 | alloced_count++; | |
472 | ||
473 | list_for_each_entry(req, &sba->reqs_pending_list, node) | |
474 | pending_count++; | |
475 | ||
476 | list_for_each_entry(req, &sba->reqs_active_list, node) | |
477 | active_count++; | |
478 | ||
479 | list_for_each_entry(req, &sba->reqs_aborted_list, node) | |
480 | aborted_count++; | |
481 | ||
8529a927 AP |
482 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
483 | ||
484 | seq_printf(file, "maximum requests = %d\n", sba->max_req); | |
485 | seq_printf(file, "free requests = %d\n", free_count); | |
486 | seq_printf(file, "alloced requests = %d\n", alloced_count); | |
487 | seq_printf(file, "pending requests = %d\n", pending_count); | |
488 | seq_printf(file, "active requests = %d\n", active_count); | |
489 | seq_printf(file, "aborted requests = %d\n", aborted_count); | |
8529a927 AP |
490 | } |
491 | ||
f8338514 AP |
492 | /* ====== DMAENGINE callbacks ===== */ |
493 | ||
494 | static void sba_free_chan_resources(struct dma_chan *dchan) | |
495 | { | |
496 | /* | |
497 | * Channel resources are pre-alloced so we just free-up | |
498 | * whatever we can so that we can re-use pre-alloced | |
499 | * channel resources next time. | |
500 | */ | |
501 | sba_cleanup_nonpending_requests(to_sba_device(dchan)); | |
502 | } | |
503 | ||
504 | static int sba_device_terminate_all(struct dma_chan *dchan) | |
505 | { | |
506 | /* Cleanup all pending requests */ | |
507 | sba_cleanup_pending_requests(to_sba_device(dchan)); | |
508 | ||
509 | return 0; | |
510 | } | |
511 | ||
512 | static void sba_issue_pending(struct dma_chan *dchan) | |
513 | { | |
d6ffd239 | 514 | unsigned long flags; |
f8338514 AP |
515 | struct sba_device *sba = to_sba_device(dchan); |
516 | ||
d6ffd239 AP |
517 | /* Process pending requests */ |
518 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
519 | _sba_process_pending_requests(sba); | |
520 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | |
f8338514 AP |
521 | } |
522 | ||
743e1c8f AP |
523 | static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx) |
524 | { | |
525 | unsigned long flags; | |
526 | dma_cookie_t cookie; | |
527 | struct sba_device *sba; | |
528 | struct sba_request *req, *nreq; | |
529 | ||
530 | if (unlikely(!tx)) | |
531 | return -EINVAL; | |
532 | ||
533 | sba = to_sba_device(tx->chan); | |
534 | req = to_sba_request(tx); | |
535 | ||
536 | /* Assign cookie and mark all chained requests pending */ | |
537 | spin_lock_irqsave(&sba->reqs_lock, flags); | |
538 | cookie = dma_cookie_assign(tx); | |
539 | _sba_pending_request(sba, req); | |
540 | list_for_each_entry(nreq, &req->next, next) | |
541 | _sba_pending_request(sba, nreq); | |
542 | spin_unlock_irqrestore(&sba->reqs_lock, flags); | |
543 | ||
544 | return cookie; | |
545 | } | |
546 | ||
547 | static enum dma_status sba_tx_status(struct dma_chan *dchan, | |
548 | dma_cookie_t cookie, | |
549 | struct dma_tx_state *txstate) | |
550 | { | |
743e1c8f AP |
551 | enum dma_status ret; |
552 | struct sba_device *sba = to_sba_device(dchan); | |
553 | ||
743e1c8f AP |
554 | ret = dma_cookie_status(dchan, cookie, txstate); |
555 | if (ret == DMA_COMPLETE) | |
556 | return ret; | |
557 | ||
4e9f8187 | 558 | mbox_client_peek_data(sba->mchan); |
6df8f913 | 559 | |
743e1c8f AP |
560 | return dma_cookie_status(dchan, cookie, txstate); |
561 | } | |
562 | ||
563 | static void sba_fillup_interrupt_msg(struct sba_request *req, | |
564 | struct brcm_sba_command *cmds, | |
565 | struct brcm_message *msg) | |
566 | { | |
567 | u64 cmd; | |
568 | u32 c_mdata; | |
e7ae72aa | 569 | dma_addr_t resp_dma = req->tx.phys; |
743e1c8f AP |
570 | struct brcm_sba_command *cmdsp = cmds; |
571 | ||
572 | /* Type-B command to load dummy data into buf0 */ | |
573 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
574 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
575 | cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, | |
576 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
577 | c_mdata = sba_cmd_load_c_mdata(0); | |
578 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
579 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
580 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, | |
581 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
582 | cmdsp->cmd = cmd; | |
583 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
584 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
e7ae72aa | 585 | cmdsp->data = resp_dma; |
743e1c8f AP |
586 | cmdsp->data_len = req->sba->hw_resp_size; |
587 | cmdsp++; | |
588 | ||
589 | /* Type-A command to write buf0 to dummy location */ | |
590 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
591 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
592 | cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, | |
593 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
594 | cmd = sba_cmd_enc(cmd, 0x1, | |
595 | SBA_RESP_SHIFT, SBA_RESP_MASK); | |
596 | c_mdata = sba_cmd_write_c_mdata(0); | |
597 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
598 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
599 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, | |
600 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
601 | cmdsp->cmd = cmd; | |
602 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
603 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
604 | if (req->sba->hw_resp_size) { | |
605 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | |
e7ae72aa | 606 | cmdsp->resp = resp_dma; |
743e1c8f AP |
607 | cmdsp->resp_len = req->sba->hw_resp_size; |
608 | } | |
609 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | |
e7ae72aa | 610 | cmdsp->data = resp_dma; |
743e1c8f AP |
611 | cmdsp->data_len = req->sba->hw_resp_size; |
612 | cmdsp++; | |
613 | ||
614 | /* Fillup brcm_message */ | |
615 | msg->type = BRCM_MESSAGE_SBA; | |
616 | msg->sba.cmds = cmds; | |
617 | msg->sba.cmds_count = cmdsp - cmds; | |
618 | msg->ctx = req; | |
619 | msg->error = 0; | |
620 | } | |
621 | ||
622 | static struct dma_async_tx_descriptor * | |
623 | sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) | |
624 | { | |
625 | struct sba_request *req = NULL; | |
626 | struct sba_device *sba = to_sba_device(dchan); | |
627 | ||
628 | /* Alloc new request */ | |
629 | req = sba_alloc_request(sba); | |
630 | if (!req) | |
631 | return NULL; | |
632 | ||
633 | /* | |
634 | * Force fence so that no requests are submitted | |
635 | * until DMA callback for this request is invoked. | |
636 | */ | |
57a28508 | 637 | req->flags |= SBA_REQUEST_FENCE; |
743e1c8f AP |
638 | |
639 | /* Fillup request message */ | |
640 | sba_fillup_interrupt_msg(req, req->cmds, &req->msg); | |
641 | ||
642 | /* Init async_tx descriptor */ | |
643 | req->tx.flags = flags; | |
644 | req->tx.cookie = -EBUSY; | |
645 | ||
1fc63cb4 | 646 | return &req->tx; |
743e1c8f AP |
647 | } |
648 | ||
649 | static void sba_fillup_memcpy_msg(struct sba_request *req, | |
650 | struct brcm_sba_command *cmds, | |
651 | struct brcm_message *msg, | |
652 | dma_addr_t msg_offset, size_t msg_len, | |
653 | dma_addr_t dst, dma_addr_t src) | |
654 | { | |
655 | u64 cmd; | |
656 | u32 c_mdata; | |
e7ae72aa | 657 | dma_addr_t resp_dma = req->tx.phys; |
743e1c8f AP |
658 | struct brcm_sba_command *cmdsp = cmds; |
659 | ||
660 | /* Type-B command to load data into buf0 */ | |
661 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
662 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
663 | cmd = sba_cmd_enc(cmd, msg_len, | |
664 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
665 | c_mdata = sba_cmd_load_c_mdata(0); | |
666 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
667 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
668 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, | |
669 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
670 | cmdsp->cmd = cmd; | |
671 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
672 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
673 | cmdsp->data = src + msg_offset; | |
674 | cmdsp->data_len = msg_len; | |
675 | cmdsp++; | |
676 | ||
677 | /* Type-A command to write buf0 */ | |
678 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
679 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
680 | cmd = sba_cmd_enc(cmd, msg_len, | |
681 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
682 | cmd = sba_cmd_enc(cmd, 0x1, | |
683 | SBA_RESP_SHIFT, SBA_RESP_MASK); | |
684 | c_mdata = sba_cmd_write_c_mdata(0); | |
685 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
686 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
687 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, | |
688 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
689 | cmdsp->cmd = cmd; | |
690 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
691 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
692 | if (req->sba->hw_resp_size) { | |
693 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | |
e7ae72aa | 694 | cmdsp->resp = resp_dma; |
743e1c8f AP |
695 | cmdsp->resp_len = req->sba->hw_resp_size; |
696 | } | |
697 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | |
698 | cmdsp->data = dst + msg_offset; | |
699 | cmdsp->data_len = msg_len; | |
700 | cmdsp++; | |
701 | ||
702 | /* Fillup brcm_message */ | |
703 | msg->type = BRCM_MESSAGE_SBA; | |
704 | msg->sba.cmds = cmds; | |
705 | msg->sba.cmds_count = cmdsp - cmds; | |
706 | msg->ctx = req; | |
707 | msg->error = 0; | |
708 | } | |
709 | ||
710 | static struct sba_request * | |
711 | sba_prep_dma_memcpy_req(struct sba_device *sba, | |
712 | dma_addr_t off, dma_addr_t dst, dma_addr_t src, | |
713 | size_t len, unsigned long flags) | |
714 | { | |
715 | struct sba_request *req = NULL; | |
716 | ||
717 | /* Alloc new request */ | |
718 | req = sba_alloc_request(sba); | |
719 | if (!req) | |
720 | return NULL; | |
57a28508 AP |
721 | if (flags & DMA_PREP_FENCE) |
722 | req->flags |= SBA_REQUEST_FENCE; | |
743e1c8f AP |
723 | |
724 | /* Fillup request message */ | |
725 | sba_fillup_memcpy_msg(req, req->cmds, &req->msg, | |
726 | off, len, dst, src); | |
727 | ||
728 | /* Init async_tx descriptor */ | |
729 | req->tx.flags = flags; | |
730 | req->tx.cookie = -EBUSY; | |
731 | ||
732 | return req; | |
733 | } | |
734 | ||
735 | static struct dma_async_tx_descriptor * | |
736 | sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, | |
737 | size_t len, unsigned long flags) | |
738 | { | |
739 | size_t req_len; | |
740 | dma_addr_t off = 0; | |
741 | struct sba_device *sba = to_sba_device(dchan); | |
742 | struct sba_request *first = NULL, *req; | |
743 | ||
744 | /* Create chained requests where each request is upto hw_buf_size */ | |
745 | while (len) { | |
746 | req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; | |
747 | ||
748 | req = sba_prep_dma_memcpy_req(sba, off, dst, src, | |
749 | req_len, flags); | |
750 | if (!req) { | |
751 | if (first) | |
752 | sba_free_chained_requests(first); | |
753 | return NULL; | |
754 | } | |
755 | ||
756 | if (first) | |
757 | sba_chain_request(first, req); | |
758 | else | |
759 | first = req; | |
760 | ||
761 | off += req_len; | |
762 | len -= req_len; | |
763 | } | |
764 | ||
765 | return (first) ? &first->tx : NULL; | |
766 | } | |
767 | ||
768 | static void sba_fillup_xor_msg(struct sba_request *req, | |
769 | struct brcm_sba_command *cmds, | |
770 | struct brcm_message *msg, | |
771 | dma_addr_t msg_offset, size_t msg_len, | |
772 | dma_addr_t dst, dma_addr_t *src, u32 src_cnt) | |
773 | { | |
774 | u64 cmd; | |
775 | u32 c_mdata; | |
776 | unsigned int i; | |
e7ae72aa | 777 | dma_addr_t resp_dma = req->tx.phys; |
743e1c8f AP |
778 | struct brcm_sba_command *cmdsp = cmds; |
779 | ||
780 | /* Type-B command to load data into buf0 */ | |
781 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
782 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
783 | cmd = sba_cmd_enc(cmd, msg_len, | |
784 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
785 | c_mdata = sba_cmd_load_c_mdata(0); | |
786 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
787 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
788 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, | |
789 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
790 | cmdsp->cmd = cmd; | |
791 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
792 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
793 | cmdsp->data = src[0] + msg_offset; | |
794 | cmdsp->data_len = msg_len; | |
795 | cmdsp++; | |
796 | ||
797 | /* Type-B commands to xor data with buf0 and put it back in buf0 */ | |
798 | for (i = 1; i < src_cnt; i++) { | |
799 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
800 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
801 | cmd = sba_cmd_enc(cmd, msg_len, | |
802 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
803 | c_mdata = sba_cmd_xor_c_mdata(0, 0); | |
804 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
805 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
806 | cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, | |
807 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
808 | cmdsp->cmd = cmd; | |
809 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
810 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
811 | cmdsp->data = src[i] + msg_offset; | |
812 | cmdsp->data_len = msg_len; | |
813 | cmdsp++; | |
814 | } | |
815 | ||
816 | /* Type-A command to write buf0 */ | |
817 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
818 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
819 | cmd = sba_cmd_enc(cmd, msg_len, | |
820 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
821 | cmd = sba_cmd_enc(cmd, 0x1, | |
822 | SBA_RESP_SHIFT, SBA_RESP_MASK); | |
823 | c_mdata = sba_cmd_write_c_mdata(0); | |
824 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
825 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
826 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, | |
827 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
828 | cmdsp->cmd = cmd; | |
829 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
830 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
831 | if (req->sba->hw_resp_size) { | |
832 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | |
e7ae72aa | 833 | cmdsp->resp = resp_dma; |
743e1c8f AP |
834 | cmdsp->resp_len = req->sba->hw_resp_size; |
835 | } | |
836 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | |
837 | cmdsp->data = dst + msg_offset; | |
838 | cmdsp->data_len = msg_len; | |
839 | cmdsp++; | |
840 | ||
841 | /* Fillup brcm_message */ | |
842 | msg->type = BRCM_MESSAGE_SBA; | |
843 | msg->sba.cmds = cmds; | |
844 | msg->sba.cmds_count = cmdsp - cmds; | |
845 | msg->ctx = req; | |
846 | msg->error = 0; | |
847 | } | |
848 | ||
dd2bceb0 | 849 | static struct sba_request * |
743e1c8f AP |
850 | sba_prep_dma_xor_req(struct sba_device *sba, |
851 | dma_addr_t off, dma_addr_t dst, dma_addr_t *src, | |
852 | u32 src_cnt, size_t len, unsigned long flags) | |
853 | { | |
854 | struct sba_request *req = NULL; | |
855 | ||
856 | /* Alloc new request */ | |
857 | req = sba_alloc_request(sba); | |
858 | if (!req) | |
859 | return NULL; | |
57a28508 AP |
860 | if (flags & DMA_PREP_FENCE) |
861 | req->flags |= SBA_REQUEST_FENCE; | |
743e1c8f AP |
862 | |
863 | /* Fillup request message */ | |
864 | sba_fillup_xor_msg(req, req->cmds, &req->msg, | |
865 | off, len, dst, src, src_cnt); | |
866 | ||
867 | /* Init async_tx descriptor */ | |
868 | req->tx.flags = flags; | |
869 | req->tx.cookie = -EBUSY; | |
870 | ||
871 | return req; | |
872 | } | |
873 | ||
874 | static struct dma_async_tx_descriptor * | |
875 | sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, | |
876 | u32 src_cnt, size_t len, unsigned long flags) | |
877 | { | |
878 | size_t req_len; | |
879 | dma_addr_t off = 0; | |
880 | struct sba_device *sba = to_sba_device(dchan); | |
881 | struct sba_request *first = NULL, *req; | |
882 | ||
883 | /* Sanity checks */ | |
884 | if (unlikely(src_cnt > sba->max_xor_srcs)) | |
885 | return NULL; | |
886 | ||
887 | /* Create chained requests where each request is upto hw_buf_size */ | |
888 | while (len) { | |
889 | req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; | |
890 | ||
891 | req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, | |
892 | req_len, flags); | |
893 | if (!req) { | |
894 | if (first) | |
895 | sba_free_chained_requests(first); | |
896 | return NULL; | |
897 | } | |
898 | ||
899 | if (first) | |
900 | sba_chain_request(first, req); | |
901 | else | |
902 | first = req; | |
903 | ||
904 | off += req_len; | |
905 | len -= req_len; | |
906 | } | |
907 | ||
908 | return (first) ? &first->tx : NULL; | |
909 | } | |
910 | ||
911 | static void sba_fillup_pq_msg(struct sba_request *req, | |
912 | bool pq_continue, | |
913 | struct brcm_sba_command *cmds, | |
914 | struct brcm_message *msg, | |
915 | dma_addr_t msg_offset, size_t msg_len, | |
916 | dma_addr_t *dst_p, dma_addr_t *dst_q, | |
917 | const u8 *scf, dma_addr_t *src, u32 src_cnt) | |
918 | { | |
919 | u64 cmd; | |
920 | u32 c_mdata; | |
921 | unsigned int i; | |
e7ae72aa | 922 | dma_addr_t resp_dma = req->tx.phys; |
743e1c8f AP |
923 | struct brcm_sba_command *cmdsp = cmds; |
924 | ||
925 | if (pq_continue) { | |
926 | /* Type-B command to load old P into buf0 */ | |
927 | if (dst_p) { | |
928 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
929 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
930 | cmd = sba_cmd_enc(cmd, msg_len, | |
931 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
932 | c_mdata = sba_cmd_load_c_mdata(0); | |
933 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
934 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
935 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, | |
936 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
937 | cmdsp->cmd = cmd; | |
938 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
939 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
940 | cmdsp->data = *dst_p + msg_offset; | |
941 | cmdsp->data_len = msg_len; | |
942 | cmdsp++; | |
943 | } | |
944 | ||
945 | /* Type-B command to load old Q into buf1 */ | |
946 | if (dst_q) { | |
947 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
948 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
949 | cmd = sba_cmd_enc(cmd, msg_len, | |
950 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
951 | c_mdata = sba_cmd_load_c_mdata(1); | |
952 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
953 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
954 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, | |
955 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
956 | cmdsp->cmd = cmd; | |
957 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
958 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
959 | cmdsp->data = *dst_q + msg_offset; | |
960 | cmdsp->data_len = msg_len; | |
961 | cmdsp++; | |
962 | } | |
963 | } else { | |
964 | /* Type-A command to zero all buffers */ | |
965 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
966 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
967 | cmd = sba_cmd_enc(cmd, msg_len, | |
968 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
969 | cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, | |
970 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
971 | cmdsp->cmd = cmd; | |
972 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
973 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
974 | cmdsp++; | |
975 | } | |
976 | ||
977 | /* Type-B commands for generate P onto buf0 and Q onto buf1 */ | |
978 | for (i = 0; i < src_cnt; i++) { | |
979 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
980 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
981 | cmd = sba_cmd_enc(cmd, msg_len, | |
982 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
983 | c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0); | |
984 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
985 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
986 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), | |
987 | SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); | |
988 | cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR, | |
989 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
990 | cmdsp->cmd = cmd; | |
991 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
992 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
993 | cmdsp->data = src[i] + msg_offset; | |
994 | cmdsp->data_len = msg_len; | |
995 | cmdsp++; | |
996 | } | |
997 | ||
998 | /* Type-A command to write buf0 */ | |
999 | if (dst_p) { | |
1000 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
1001 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1002 | cmd = sba_cmd_enc(cmd, msg_len, | |
1003 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1004 | cmd = sba_cmd_enc(cmd, 0x1, | |
1005 | SBA_RESP_SHIFT, SBA_RESP_MASK); | |
1006 | c_mdata = sba_cmd_write_c_mdata(0); | |
1007 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1008 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1009 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, | |
1010 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1011 | cmdsp->cmd = cmd; | |
1012 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1013 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
1014 | if (req->sba->hw_resp_size) { | |
1015 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | |
e7ae72aa | 1016 | cmdsp->resp = resp_dma; |
743e1c8f AP |
1017 | cmdsp->resp_len = req->sba->hw_resp_size; |
1018 | } | |
1019 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | |
1020 | cmdsp->data = *dst_p + msg_offset; | |
1021 | cmdsp->data_len = msg_len; | |
1022 | cmdsp++; | |
1023 | } | |
1024 | ||
1025 | /* Type-A command to write buf1 */ | |
1026 | if (dst_q) { | |
1027 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
1028 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1029 | cmd = sba_cmd_enc(cmd, msg_len, | |
1030 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1031 | cmd = sba_cmd_enc(cmd, 0x1, | |
1032 | SBA_RESP_SHIFT, SBA_RESP_MASK); | |
1033 | c_mdata = sba_cmd_write_c_mdata(1); | |
1034 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1035 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1036 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, | |
1037 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1038 | cmdsp->cmd = cmd; | |
1039 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1040 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
1041 | if (req->sba->hw_resp_size) { | |
1042 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | |
e7ae72aa | 1043 | cmdsp->resp = resp_dma; |
743e1c8f AP |
1044 | cmdsp->resp_len = req->sba->hw_resp_size; |
1045 | } | |
1046 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | |
1047 | cmdsp->data = *dst_q + msg_offset; | |
1048 | cmdsp->data_len = msg_len; | |
1049 | cmdsp++; | |
1050 | } | |
1051 | ||
1052 | /* Fillup brcm_message */ | |
1053 | msg->type = BRCM_MESSAGE_SBA; | |
1054 | msg->sba.cmds = cmds; | |
1055 | msg->sba.cmds_count = cmdsp - cmds; | |
1056 | msg->ctx = req; | |
1057 | msg->error = 0; | |
1058 | } | |
1059 | ||
dd2bceb0 | 1060 | static struct sba_request * |
743e1c8f AP |
1061 | sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, |
1062 | dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, | |
1063 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) | |
1064 | { | |
1065 | struct sba_request *req = NULL; | |
1066 | ||
1067 | /* Alloc new request */ | |
1068 | req = sba_alloc_request(sba); | |
1069 | if (!req) | |
1070 | return NULL; | |
57a28508 AP |
1071 | if (flags & DMA_PREP_FENCE) |
1072 | req->flags |= SBA_REQUEST_FENCE; | |
743e1c8f AP |
1073 | |
1074 | /* Fillup request messages */ | |
1075 | sba_fillup_pq_msg(req, dmaf_continue(flags), | |
1076 | req->cmds, &req->msg, | |
1077 | off, len, dst_p, dst_q, scf, src, src_cnt); | |
1078 | ||
1079 | /* Init async_tx descriptor */ | |
1080 | req->tx.flags = flags; | |
1081 | req->tx.cookie = -EBUSY; | |
1082 | ||
1083 | return req; | |
1084 | } | |
1085 | ||
1086 | static void sba_fillup_pq_single_msg(struct sba_request *req, | |
1087 | bool pq_continue, | |
1088 | struct brcm_sba_command *cmds, | |
1089 | struct brcm_message *msg, | |
1090 | dma_addr_t msg_offset, size_t msg_len, | |
1091 | dma_addr_t *dst_p, dma_addr_t *dst_q, | |
1092 | dma_addr_t src, u8 scf) | |
1093 | { | |
1094 | u64 cmd; | |
1095 | u32 c_mdata; | |
1096 | u8 pos, dpos = raid6_gflog[scf]; | |
e7ae72aa | 1097 | dma_addr_t resp_dma = req->tx.phys; |
743e1c8f AP |
1098 | struct brcm_sba_command *cmdsp = cmds; |
1099 | ||
1100 | if (!dst_p) | |
1101 | goto skip_p; | |
1102 | ||
1103 | if (pq_continue) { | |
1104 | /* Type-B command to load old P into buf0 */ | |
1105 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
1106 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1107 | cmd = sba_cmd_enc(cmd, msg_len, | |
1108 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1109 | c_mdata = sba_cmd_load_c_mdata(0); | |
1110 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1111 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1112 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, | |
1113 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1114 | cmdsp->cmd = cmd; | |
1115 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1116 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
1117 | cmdsp->data = *dst_p + msg_offset; | |
1118 | cmdsp->data_len = msg_len; | |
1119 | cmdsp++; | |
1120 | ||
1121 | /* | |
1122 | * Type-B commands to xor data with buf0 and put it | |
1123 | * back in buf0 | |
1124 | */ | |
1125 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
1126 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1127 | cmd = sba_cmd_enc(cmd, msg_len, | |
1128 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1129 | c_mdata = sba_cmd_xor_c_mdata(0, 0); | |
1130 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1131 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1132 | cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, | |
1133 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1134 | cmdsp->cmd = cmd; | |
1135 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1136 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
1137 | cmdsp->data = src + msg_offset; | |
1138 | cmdsp->data_len = msg_len; | |
1139 | cmdsp++; | |
1140 | } else { | |
1141 | /* Type-B command to load old P into buf0 */ | |
1142 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
1143 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1144 | cmd = sba_cmd_enc(cmd, msg_len, | |
1145 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1146 | c_mdata = sba_cmd_load_c_mdata(0); | |
1147 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1148 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1149 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, | |
1150 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1151 | cmdsp->cmd = cmd; | |
1152 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1153 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
1154 | cmdsp->data = src + msg_offset; | |
1155 | cmdsp->data_len = msg_len; | |
1156 | cmdsp++; | |
1157 | } | |
1158 | ||
1159 | /* Type-A command to write buf0 */ | |
1160 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
1161 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1162 | cmd = sba_cmd_enc(cmd, msg_len, | |
1163 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1164 | cmd = sba_cmd_enc(cmd, 0x1, | |
1165 | SBA_RESP_SHIFT, SBA_RESP_MASK); | |
1166 | c_mdata = sba_cmd_write_c_mdata(0); | |
1167 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1168 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1169 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, | |
1170 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1171 | cmdsp->cmd = cmd; | |
1172 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1173 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
1174 | if (req->sba->hw_resp_size) { | |
1175 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | |
e7ae72aa | 1176 | cmdsp->resp = resp_dma; |
743e1c8f AP |
1177 | cmdsp->resp_len = req->sba->hw_resp_size; |
1178 | } | |
1179 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | |
1180 | cmdsp->data = *dst_p + msg_offset; | |
1181 | cmdsp->data_len = msg_len; | |
1182 | cmdsp++; | |
1183 | ||
1184 | skip_p: | |
1185 | if (!dst_q) | |
1186 | goto skip_q; | |
1187 | ||
1188 | /* Type-A command to zero all buffers */ | |
1189 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
1190 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1191 | cmd = sba_cmd_enc(cmd, msg_len, | |
1192 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1193 | cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, | |
1194 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1195 | cmdsp->cmd = cmd; | |
1196 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1197 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
1198 | cmdsp++; | |
1199 | ||
1200 | if (dpos == 255) | |
1201 | goto skip_q_computation; | |
1202 | pos = (dpos < req->sba->max_pq_coefs) ? | |
1203 | dpos : (req->sba->max_pq_coefs - 1); | |
1204 | ||
1205 | /* | |
1206 | * Type-B command to generate initial Q from data | |
1207 | * and store output into buf0 | |
1208 | */ | |
1209 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
1210 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1211 | cmd = sba_cmd_enc(cmd, msg_len, | |
1212 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1213 | c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0); | |
1214 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1215 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1216 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), | |
1217 | SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); | |
1218 | cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, | |
1219 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1220 | cmdsp->cmd = cmd; | |
1221 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1222 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
1223 | cmdsp->data = src + msg_offset; | |
1224 | cmdsp->data_len = msg_len; | |
1225 | cmdsp++; | |
1226 | ||
1227 | dpos -= pos; | |
1228 | ||
1229 | /* Multiple Type-A command to generate final Q */ | |
1230 | while (dpos) { | |
1231 | pos = (dpos < req->sba->max_pq_coefs) ? | |
1232 | dpos : (req->sba->max_pq_coefs - 1); | |
1233 | ||
1234 | /* | |
1235 | * Type-A command to generate Q with buf0 and | |
1236 | * buf1 store result in buf0 | |
1237 | */ | |
1238 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
1239 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1240 | cmd = sba_cmd_enc(cmd, msg_len, | |
1241 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1242 | c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1); | |
1243 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1244 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1245 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), | |
1246 | SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); | |
1247 | cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, | |
1248 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1249 | cmdsp->cmd = cmd; | |
1250 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1251 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
1252 | cmdsp++; | |
1253 | ||
1254 | dpos -= pos; | |
1255 | } | |
1256 | ||
1257 | skip_q_computation: | |
1258 | if (pq_continue) { | |
1259 | /* | |
1260 | * Type-B command to XOR previous output with | |
1261 | * buf0 and write it into buf0 | |
1262 | */ | |
1263 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, | |
1264 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1265 | cmd = sba_cmd_enc(cmd, msg_len, | |
1266 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1267 | c_mdata = sba_cmd_xor_c_mdata(0, 0); | |
1268 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1269 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1270 | cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, | |
1271 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1272 | cmdsp->cmd = cmd; | |
1273 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1274 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; | |
1275 | cmdsp->data = *dst_q + msg_offset; | |
1276 | cmdsp->data_len = msg_len; | |
1277 | cmdsp++; | |
1278 | } | |
1279 | ||
1280 | /* Type-A command to write buf0 */ | |
1281 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, | |
1282 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); | |
1283 | cmd = sba_cmd_enc(cmd, msg_len, | |
1284 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); | |
1285 | cmd = sba_cmd_enc(cmd, 0x1, | |
1286 | SBA_RESP_SHIFT, SBA_RESP_MASK); | |
1287 | c_mdata = sba_cmd_write_c_mdata(0); | |
1288 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), | |
1289 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); | |
1290 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, | |
1291 | SBA_CMD_SHIFT, SBA_CMD_MASK); | |
1292 | cmdsp->cmd = cmd; | |
1293 | *cmdsp->cmd_dma = cpu_to_le64(cmd); | |
1294 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; | |
1295 | if (req->sba->hw_resp_size) { | |
1296 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; | |
e7ae72aa | 1297 | cmdsp->resp = resp_dma; |
743e1c8f AP |
1298 | cmdsp->resp_len = req->sba->hw_resp_size; |
1299 | } | |
1300 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; | |
1301 | cmdsp->data = *dst_q + msg_offset; | |
1302 | cmdsp->data_len = msg_len; | |
1303 | cmdsp++; | |
1304 | ||
1305 | skip_q: | |
1306 | /* Fillup brcm_message */ | |
1307 | msg->type = BRCM_MESSAGE_SBA; | |
1308 | msg->sba.cmds = cmds; | |
1309 | msg->sba.cmds_count = cmdsp - cmds; | |
1310 | msg->ctx = req; | |
1311 | msg->error = 0; | |
1312 | } | |
1313 | ||
dd2bceb0 | 1314 | static struct sba_request * |
743e1c8f AP |
1315 | sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, |
1316 | dma_addr_t *dst_p, dma_addr_t *dst_q, | |
1317 | dma_addr_t src, u8 scf, size_t len, | |
1318 | unsigned long flags) | |
1319 | { | |
1320 | struct sba_request *req = NULL; | |
1321 | ||
1322 | /* Alloc new request */ | |
1323 | req = sba_alloc_request(sba); | |
1324 | if (!req) | |
1325 | return NULL; | |
57a28508 AP |
1326 | if (flags & DMA_PREP_FENCE) |
1327 | req->flags |= SBA_REQUEST_FENCE; | |
743e1c8f AP |
1328 | |
1329 | /* Fillup request messages */ | |
1330 | sba_fillup_pq_single_msg(req, dmaf_continue(flags), | |
1331 | req->cmds, &req->msg, off, len, | |
1332 | dst_p, dst_q, src, scf); | |
1333 | ||
1334 | /* Init async_tx descriptor */ | |
1335 | req->tx.flags = flags; | |
1336 | req->tx.cookie = -EBUSY; | |
1337 | ||
1338 | return req; | |
1339 | } | |
1340 | ||
1341 | static struct dma_async_tx_descriptor * | |
1342 | sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, | |
1343 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) | |
1344 | { | |
1345 | u32 i, dst_q_index; | |
1346 | size_t req_len; | |
1347 | bool slow = false; | |
1348 | dma_addr_t off = 0; | |
1349 | dma_addr_t *dst_p = NULL, *dst_q = NULL; | |
1350 | struct sba_device *sba = to_sba_device(dchan); | |
1351 | struct sba_request *first = NULL, *req; | |
1352 | ||
1353 | /* Sanity checks */ | |
1354 | if (unlikely(src_cnt > sba->max_pq_srcs)) | |
1355 | return NULL; | |
1356 | for (i = 0; i < src_cnt; i++) | |
1357 | if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) | |
1358 | slow = true; | |
1359 | ||
1360 | /* Figure-out P and Q destination addresses */ | |
1361 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) | |
1362 | dst_p = &dst[0]; | |
1363 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) | |
1364 | dst_q = &dst[1]; | |
1365 | ||
1366 | /* Create chained requests where each request is upto hw_buf_size */ | |
1367 | while (len) { | |
1368 | req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; | |
1369 | ||
1370 | if (slow) { | |
1371 | dst_q_index = src_cnt; | |
1372 | ||
1373 | if (dst_q) { | |
1374 | for (i = 0; i < src_cnt; i++) { | |
1375 | if (*dst_q == src[i]) { | |
1376 | dst_q_index = i; | |
1377 | break; | |
1378 | } | |
1379 | } | |
1380 | } | |
1381 | ||
1382 | if (dst_q_index < src_cnt) { | |
1383 | i = dst_q_index; | |
1384 | req = sba_prep_dma_pq_single_req(sba, | |
1385 | off, dst_p, dst_q, src[i], scf[i], | |
1386 | req_len, flags | DMA_PREP_FENCE); | |
1387 | if (!req) | |
1388 | goto fail; | |
1389 | ||
1390 | if (first) | |
1391 | sba_chain_request(first, req); | |
1392 | else | |
1393 | first = req; | |
1394 | ||
1395 | flags |= DMA_PREP_CONTINUE; | |
1396 | } | |
1397 | ||
1398 | for (i = 0; i < src_cnt; i++) { | |
1399 | if (dst_q_index == i) | |
1400 | continue; | |
1401 | ||
1402 | req = sba_prep_dma_pq_single_req(sba, | |
1403 | off, dst_p, dst_q, src[i], scf[i], | |
1404 | req_len, flags | DMA_PREP_FENCE); | |
1405 | if (!req) | |
1406 | goto fail; | |
1407 | ||
1408 | if (first) | |
1409 | sba_chain_request(first, req); | |
1410 | else | |
1411 | first = req; | |
1412 | ||
1413 | flags |= DMA_PREP_CONTINUE; | |
1414 | } | |
1415 | } else { | |
1416 | req = sba_prep_dma_pq_req(sba, off, | |
1417 | dst_p, dst_q, src, src_cnt, | |
1418 | scf, req_len, flags); | |
1419 | if (!req) | |
1420 | goto fail; | |
1421 | ||
1422 | if (first) | |
1423 | sba_chain_request(first, req); | |
1424 | else | |
1425 | first = req; | |
1426 | } | |
1427 | ||
1428 | off += req_len; | |
1429 | len -= req_len; | |
1430 | } | |
1431 | ||
1432 | return (first) ? &first->tx : NULL; | |
1433 | ||
1434 | fail: | |
1435 | if (first) | |
1436 | sba_free_chained_requests(first); | |
1437 | return NULL; | |
1438 | } | |
1439 | ||
1440 | /* ====== Mailbox callbacks ===== */ | |
1441 | ||
743e1c8f AP |
1442 | static void sba_receive_message(struct mbox_client *cl, void *msg) |
1443 | { | |
743e1c8f | 1444 | struct brcm_message *m = msg; |
f8338514 | 1445 | struct sba_request *req = m->ctx; |
743e1c8f AP |
1446 | struct sba_device *sba = req->sba; |
1447 | ||
1448 | /* Error count if message has error */ | |
1449 | if (m->error < 0) | |
1450 | dev_err(sba->dev, "%s got message with error %d", | |
1451 | dma_chan_name(&sba->dma_chan), m->error); | |
1452 | ||
f8338514 AP |
1453 | /* Process received request */ |
1454 | sba_process_received_request(sba, req); | |
743e1c8f AP |
1455 | } |
1456 | ||
8529a927 AP |
1457 | /* ====== Debugfs callbacks ====== */ |
1458 | ||
1459 | static int sba_debugfs_stats_show(struct seq_file *file, void *offset) | |
1460 | { | |
95d47fb7 | 1461 | struct sba_device *sba = dev_get_drvdata(file->private); |
8529a927 AP |
1462 | |
1463 | /* Write stats in file */ | |
1464 | sba_write_stats_in_seqfile(sba, file); | |
1465 | ||
1466 | return 0; | |
1467 | } | |
1468 | ||
743e1c8f AP |
1469 | /* ====== Platform driver routines ===== */ |
1470 | ||
1471 | static int sba_prealloc_channel_resources(struct sba_device *sba) | |
1472 | { | |
e7ae72aa | 1473 | int i, j, ret = 0; |
743e1c8f AP |
1474 | struct sba_request *req = NULL; |
1475 | ||
eb67744b | 1476 | sba->resp_base = dma_alloc_coherent(sba->mbox_dev, |
743e1c8f AP |
1477 | sba->max_resp_pool_size, |
1478 | &sba->resp_dma_base, GFP_KERNEL); | |
1479 | if (!sba->resp_base) | |
1480 | return -ENOMEM; | |
1481 | ||
eb67744b | 1482 | sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, |
743e1c8f AP |
1483 | sba->max_cmds_pool_size, |
1484 | &sba->cmds_dma_base, GFP_KERNEL); | |
1485 | if (!sba->cmds_base) { | |
1486 | ret = -ENOMEM; | |
1487 | goto fail_free_resp_pool; | |
1488 | } | |
1489 | ||
1490 | spin_lock_init(&sba->reqs_lock); | |
1491 | sba->reqs_fence = false; | |
1492 | INIT_LIST_HEAD(&sba->reqs_alloc_list); | |
1493 | INIT_LIST_HEAD(&sba->reqs_pending_list); | |
1494 | INIT_LIST_HEAD(&sba->reqs_active_list); | |
743e1c8f AP |
1495 | INIT_LIST_HEAD(&sba->reqs_aborted_list); |
1496 | INIT_LIST_HEAD(&sba->reqs_free_list); | |
1497 | ||
e7ae72aa | 1498 | for (i = 0; i < sba->max_req; i++) { |
5655e00f | 1499 | req = devm_kzalloc(sba->dev, |
0ed2dd03 KC |
1500 | struct_size(req, cmds, sba->max_cmd_per_req), |
1501 | GFP_KERNEL); | |
5655e00f AP |
1502 | if (!req) { |
1503 | ret = -ENOMEM; | |
1504 | goto fail_free_cmds_pool; | |
1505 | } | |
743e1c8f AP |
1506 | INIT_LIST_HEAD(&req->node); |
1507 | req->sba = sba; | |
57a28508 | 1508 | req->flags = SBA_REQUEST_STATE_FREE; |
743e1c8f | 1509 | INIT_LIST_HEAD(&req->next); |
743e1c8f | 1510 | atomic_set(&req->next_pending_count, 0); |
743e1c8f AP |
1511 | for (j = 0; j < sba->max_cmd_per_req; j++) { |
1512 | req->cmds[j].cmd = 0; | |
1513 | req->cmds[j].cmd_dma = sba->cmds_base + | |
1514 | (i * sba->max_cmd_per_req + j) * sizeof(u64); | |
1515 | req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + | |
1516 | (i * sba->max_cmd_per_req + j) * sizeof(u64); | |
1517 | req->cmds[j].flags = 0; | |
1518 | } | |
1519 | memset(&req->msg, 0, sizeof(req->msg)); | |
1520 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); | |
ecbf9ef1 | 1521 | async_tx_ack(&req->tx); |
743e1c8f | 1522 | req->tx.tx_submit = sba_tx_submit; |
e7ae72aa | 1523 | req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; |
743e1c8f AP |
1524 | list_add_tail(&req->node, &sba->reqs_free_list); |
1525 | } | |
1526 | ||
743e1c8f AP |
1527 | return 0; |
1528 | ||
1529 | fail_free_cmds_pool: | |
eb67744b | 1530 | dma_free_coherent(sba->mbox_dev, |
743e1c8f AP |
1531 | sba->max_cmds_pool_size, |
1532 | sba->cmds_base, sba->cmds_dma_base); | |
1533 | fail_free_resp_pool: | |
eb67744b | 1534 | dma_free_coherent(sba->mbox_dev, |
743e1c8f AP |
1535 | sba->max_resp_pool_size, |
1536 | sba->resp_base, sba->resp_dma_base); | |
1537 | return ret; | |
1538 | } | |
1539 | ||
1540 | static void sba_freeup_channel_resources(struct sba_device *sba) | |
1541 | { | |
1542 | dmaengine_terminate_all(&sba->dma_chan); | |
eb67744b | 1543 | dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, |
743e1c8f | 1544 | sba->cmds_base, sba->cmds_dma_base); |
eb67744b | 1545 | dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, |
743e1c8f AP |
1546 | sba->resp_base, sba->resp_dma_base); |
1547 | sba->resp_base = NULL; | |
1548 | sba->resp_dma_base = 0; | |
1549 | } | |
1550 | ||
1551 | static int sba_async_register(struct sba_device *sba) | |
1552 | { | |
1553 | int ret; | |
1554 | struct dma_device *dma_dev = &sba->dma_dev; | |
1555 | ||
1556 | /* Initialize DMA channel cookie */ | |
1557 | sba->dma_chan.device = dma_dev; | |
1558 | dma_cookie_init(&sba->dma_chan); | |
1559 | ||
1560 | /* Initialize DMA device capability mask */ | |
1561 | dma_cap_zero(dma_dev->cap_mask); | |
1562 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); | |
1563 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); | |
1564 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); | |
1565 | dma_cap_set(DMA_PQ, dma_dev->cap_mask); | |
1566 | ||
1567 | /* | |
1568 | * Set mailbox channel device as the base device of | |
1569 | * our dma_device because the actual memory accesses | |
1570 | * will be done by mailbox controller | |
1571 | */ | |
1572 | dma_dev->dev = sba->mbox_dev; | |
1573 | ||
1574 | /* Set base prep routines */ | |
1575 | dma_dev->device_free_chan_resources = sba_free_chan_resources; | |
1576 | dma_dev->device_terminate_all = sba_device_terminate_all; | |
1577 | dma_dev->device_issue_pending = sba_issue_pending; | |
1578 | dma_dev->device_tx_status = sba_tx_status; | |
1579 | ||
1580 | /* Set interrupt routine */ | |
1581 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) | |
1582 | dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt; | |
1583 | ||
1584 | /* Set memcpy routine */ | |
1585 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) | |
1586 | dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy; | |
1587 | ||
1588 | /* Set xor routine and capability */ | |
1589 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { | |
1590 | dma_dev->device_prep_dma_xor = sba_prep_dma_xor; | |
1591 | dma_dev->max_xor = sba->max_xor_srcs; | |
1592 | } | |
1593 | ||
1594 | /* Set pq routine and capability */ | |
1595 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { | |
1596 | dma_dev->device_prep_dma_pq = sba_prep_dma_pq; | |
1597 | dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); | |
1598 | } | |
1599 | ||
1600 | /* Initialize DMA device channel list */ | |
1601 | INIT_LIST_HEAD(&dma_dev->channels); | |
1602 | list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); | |
1603 | ||
1604 | /* Register with Linux async DMA framework*/ | |
1605 | ret = dma_async_device_register(dma_dev); | |
1606 | if (ret) { | |
1607 | dev_err(sba->dev, "async device register error %d", ret); | |
1608 | return ret; | |
1609 | } | |
1610 | ||
1611 | dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", | |
1612 | dma_chan_name(&sba->dma_chan), | |
1613 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "", | |
1614 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "", | |
1615 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", | |
1616 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : ""); | |
1617 | ||
1618 | return 0; | |
1619 | } | |
1620 | ||
1621 | static int sba_probe(struct platform_device *pdev) | |
1622 | { | |
4e9f8187 | 1623 | int ret = 0; |
743e1c8f AP |
1624 | struct sba_device *sba; |
1625 | struct platform_device *mbox_pdev; | |
1626 | struct of_phandle_args args; | |
1627 | ||
1628 | /* Allocate main SBA struct */ | |
1629 | sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); | |
1630 | if (!sba) | |
1631 | return -ENOMEM; | |
1632 | ||
1633 | sba->dev = &pdev->dev; | |
1634 | platform_set_drvdata(pdev, sba); | |
1635 | ||
4e9f8187 | 1636 | /* Number of mailbox channels should be atleast 1 */ |
5346aafc AP |
1637 | ret = of_count_phandle_with_args(pdev->dev.of_node, |
1638 | "mboxes", "#mbox-cells"); | |
1639 | if (ret <= 0) | |
1640 | return -ENODEV; | |
5346aafc | 1641 | |
743e1c8f AP |
1642 | /* Determine SBA version from DT compatible string */ |
1643 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) | |
1644 | sba->ver = SBA_VER_1; | |
1645 | else if (of_device_is_compatible(sba->dev->of_node, | |
1646 | "brcm,iproc-sba-v2")) | |
1647 | sba->ver = SBA_VER_2; | |
1648 | else | |
1649 | return -ENODEV; | |
1650 | ||
1651 | /* Derived Configuration parameters */ | |
1652 | switch (sba->ver) { | |
1653 | case SBA_VER_1: | |
743e1c8f AP |
1654 | sba->hw_buf_size = 4096; |
1655 | sba->hw_resp_size = 8; | |
1656 | sba->max_pq_coefs = 6; | |
1657 | sba->max_pq_srcs = 6; | |
1658 | break; | |
1659 | case SBA_VER_2: | |
743e1c8f AP |
1660 | sba->hw_buf_size = 4096; |
1661 | sba->hw_resp_size = 8; | |
1662 | sba->max_pq_coefs = 30; | |
1663 | /* | |
1664 | * We can support max_pq_srcs == max_pq_coefs because | |
1665 | * we are limited by number of SBA commands that we can | |
1666 | * fit in one message for underlying ring manager HW. | |
1667 | */ | |
1668 | sba->max_pq_srcs = 12; | |
1669 | break; | |
1670 | default: | |
1671 | return -EINVAL; | |
1672 | } | |
4e9f8187 | 1673 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; |
743e1c8f AP |
1674 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; |
1675 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; | |
1676 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; | |
1677 | sba->max_cmds_pool_size = sba->max_req * | |
1678 | sba->max_cmd_per_req * sizeof(u64); | |
1679 | ||
1680 | /* Setup mailbox client */ | |
1681 | sba->client.dev = &pdev->dev; | |
1682 | sba->client.rx_callback = sba_receive_message; | |
1683 | sba->client.tx_block = false; | |
29e0f486 | 1684 | sba->client.knows_txdone = true; |
743e1c8f AP |
1685 | sba->client.tx_tout = 0; |
1686 | ||
4e9f8187 AP |
1687 | /* Request mailbox channel */ |
1688 | sba->mchan = mbox_request_channel(&sba->client, 0); | |
1689 | if (IS_ERR(sba->mchan)) { | |
1690 | ret = PTR_ERR(sba->mchan); | |
1691 | goto fail_free_mchan; | |
743e1c8f AP |
1692 | } |
1693 | ||
1694 | /* Find-out underlying mailbox device */ | |
1695 | ret = of_parse_phandle_with_args(pdev->dev.of_node, | |
1696 | "mboxes", "#mbox-cells", 0, &args); | |
1697 | if (ret) | |
4e9f8187 | 1698 | goto fail_free_mchan; |
743e1c8f AP |
1699 | mbox_pdev = of_find_device_by_node(args.np); |
1700 | of_node_put(args.np); | |
1701 | if (!mbox_pdev) { | |
1702 | ret = -ENODEV; | |
4e9f8187 | 1703 | goto fail_free_mchan; |
743e1c8f AP |
1704 | } |
1705 | sba->mbox_dev = &mbox_pdev->dev; | |
1706 | ||
eb67744b AP |
1707 | /* Prealloc channel resource */ |
1708 | ret = sba_prealloc_channel_resources(sba); | |
743e1c8f | 1709 | if (ret) |
4e9f8187 | 1710 | goto fail_free_mchan; |
743e1c8f | 1711 | |
8529a927 AP |
1712 | /* Check availability of debugfs */ |
1713 | if (!debugfs_initialized()) | |
1714 | goto skip_debugfs; | |
1715 | ||
1716 | /* Create debugfs root entry */ | |
1717 | sba->root = debugfs_create_dir(dev_name(sba->dev), NULL); | |
8529a927 AP |
1718 | |
1719 | /* Create debugfs stats entry */ | |
635d7302 GKH |
1720 | debugfs_create_devm_seqfile(sba->dev, "stats", sba->root, |
1721 | sba_debugfs_stats_show); | |
1722 | ||
8529a927 AP |
1723 | skip_debugfs: |
1724 | ||
eb67744b AP |
1725 | /* Register DMA device with Linux async framework */ |
1726 | ret = sba_async_register(sba); | |
743e1c8f | 1727 | if (ret) |
eb67744b | 1728 | goto fail_free_resources; |
743e1c8f AP |
1729 | |
1730 | /* Print device info */ | |
4e9f8187 | 1731 | dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", |
743e1c8f | 1732 | dma_chan_name(&sba->dma_chan), sba->ver+1, |
4e9f8187 | 1733 | dev_name(sba->mbox_dev)); |
743e1c8f AP |
1734 | |
1735 | return 0; | |
1736 | ||
eb67744b | 1737 | fail_free_resources: |
8529a927 | 1738 | debugfs_remove_recursive(sba->root); |
eb67744b | 1739 | sba_freeup_channel_resources(sba); |
4e9f8187 AP |
1740 | fail_free_mchan: |
1741 | mbox_free_channel(sba->mchan); | |
743e1c8f AP |
1742 | return ret; |
1743 | } | |
1744 | ||
1745 | static int sba_remove(struct platform_device *pdev) | |
1746 | { | |
743e1c8f AP |
1747 | struct sba_device *sba = platform_get_drvdata(pdev); |
1748 | ||
743e1c8f AP |
1749 | dma_async_device_unregister(&sba->dma_dev); |
1750 | ||
8529a927 AP |
1751 | debugfs_remove_recursive(sba->root); |
1752 | ||
eb67744b AP |
1753 | sba_freeup_channel_resources(sba); |
1754 | ||
4e9f8187 | 1755 | mbox_free_channel(sba->mchan); |
743e1c8f AP |
1756 | |
1757 | return 0; | |
1758 | } | |
1759 | ||
1760 | static const struct of_device_id sba_of_match[] = { | |
1761 | { .compatible = "brcm,iproc-sba", }, | |
1762 | { .compatible = "brcm,iproc-sba-v2", }, | |
1763 | {}, | |
1764 | }; | |
1765 | MODULE_DEVICE_TABLE(of, sba_of_match); | |
1766 | ||
1767 | static struct platform_driver sba_driver = { | |
1768 | .probe = sba_probe, | |
1769 | .remove = sba_remove, | |
1770 | .driver = { | |
1771 | .name = "bcm-sba-raid", | |
1772 | .of_match_table = sba_of_match, | |
1773 | }, | |
1774 | }; | |
1775 | module_platform_driver(sba_driver); | |
1776 | ||
1777 | MODULE_DESCRIPTION("Broadcom SBA RAID driver"); | |
1778 | MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); | |
1779 | MODULE_LICENSE("GPL v2"); |