Commit | Line | Data |
---|---|---|
7cc31cd2 SD |
1 | /* |
2 | * Intel MIC Platform Software Stack (MPSS) | |
3 | * | |
4 | * Copyright(c) 2015 Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License, version 2, as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * Intel SCIF driver. | |
16 | * | |
17 | */ | |
18 | #include "scif_main.h" | |
19 | #include "scif_map.h" | |
20 | ||
21 | /* | |
22 | * struct scif_dma_comp_cb - SCIF DMA completion callback | |
23 | * | |
24 | * @dma_completion_func: DMA completion callback | |
25 | * @cb_cookie: DMA completion callback cookie | |
26 | * @temp_buf: Temporary buffer | |
27 | * @temp_buf_to_free: Temporary buffer to be freed | |
28 | * @is_cache: Is a kmem_cache allocated buffer | |
29 | * @dst_offset: Destination registration offset | |
30 | * @dst_window: Destination registration window | |
31 | * @len: Length of the temp buffer | |
32 | * @temp_phys: DMA address of the temp buffer | |
33 | * @sdev: The SCIF device | |
34 | * @header_padding: padding for cache line alignment | |
35 | */ | |
36 | struct scif_dma_comp_cb { | |
37 | void (*dma_completion_func)(void *cookie); | |
38 | void *cb_cookie; | |
39 | u8 *temp_buf; | |
40 | u8 *temp_buf_to_free; | |
41 | bool is_cache; | |
42 | s64 dst_offset; | |
43 | struct scif_window *dst_window; | |
44 | size_t len; | |
45 | dma_addr_t temp_phys; | |
46 | struct scif_dev *sdev; | |
47 | int header_padding; | |
48 | }; | |
49 | ||
50 | /** | |
51 | * struct scif_copy_work - Work for DMA copy | |
52 | * | |
53 | * @src_offset: Starting source offset | |
54 | * @dst_offset: Starting destination offset | |
55 | * @src_window: Starting src registered window | |
56 | * @dst_window: Starting dst registered window | |
57 | * @loopback: true if this is a loopback DMA transfer | |
58 | * @len: Length of the transfer | |
59 | * @comp_cb: DMA copy completion callback | |
60 | * @remote_dev: The remote SCIF peer device | |
61 | * @fence_type: polling or interrupt based | |
62 | * @ordered: is this a tail byte ordered DMA transfer | |
63 | */ | |
64 | struct scif_copy_work { | |
65 | s64 src_offset; | |
66 | s64 dst_offset; | |
67 | struct scif_window *src_window; | |
68 | struct scif_window *dst_window; | |
69 | int loopback; | |
70 | size_t len; | |
71 | struct scif_dma_comp_cb *comp_cb; | |
72 | struct scif_dev *remote_dev; | |
73 | int fence_type; | |
74 | bool ordered; | |
75 | }; | |
76 | ||
77 | #ifndef list_entry_next | |
78 | #define list_entry_next(pos, member) \ | |
79 | list_entry(pos->member.next, typeof(*pos), member) | |
80 | #endif | |
81 | ||
82 | /** | |
83 | * scif_reserve_dma_chan: | |
84 | * @ep: Endpoint Descriptor. | |
85 | * | |
86 | * This routine reserves a DMA channel for a particular | |
87 | * endpoint. All DMA transfers for an endpoint are always | |
88 | * programmed on the same DMA channel. | |
89 | */ | |
90 | int scif_reserve_dma_chan(struct scif_endpt *ep) | |
91 | { | |
92 | int err = 0; | |
93 | struct scif_dev *scifdev; | |
94 | struct scif_hw_dev *sdev; | |
95 | struct dma_chan *chan; | |
96 | ||
97 | /* Loopback DMAs are not supported on the management node */ | |
98 | if (!scif_info.nodeid && scifdev_self(ep->remote_dev)) | |
99 | return 0; | |
100 | if (scif_info.nodeid) | |
101 | scifdev = &scif_dev[0]; | |
102 | else | |
103 | scifdev = ep->remote_dev; | |
104 | sdev = scifdev->sdev; | |
105 | if (!sdev->num_dma_ch) | |
106 | return -ENODEV; | |
107 | chan = sdev->dma_ch[scifdev->dma_ch_idx]; | |
108 | scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch; | |
109 | mutex_lock(&ep->rma_info.rma_lock); | |
110 | ep->rma_info.dma_chan = chan; | |
111 | mutex_unlock(&ep->rma_info.rma_lock); | |
112 | return err; | |
113 | } | |
114 | ||
115 | #ifdef CONFIG_MMU_NOTIFIER | |
116 | /** | |
117 | * scif_rma_destroy_tcw: | |
118 | * | |
119 | * This routine destroys temporary cached windows | |
120 | */ | |
121 | static | |
122 | void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, | |
123 | struct scif_endpt *ep, | |
124 | u64 start, u64 len) | |
125 | { | |
126 | struct list_head *item, *tmp; | |
127 | struct scif_window *window; | |
128 | u64 start_va, end_va; | |
129 | u64 end = start + len; | |
130 | ||
131 | if (end <= start) | |
132 | return; | |
133 | ||
134 | list_for_each_safe(item, tmp, &mmn->tc_reg_list) { | |
135 | window = list_entry(item, struct scif_window, list); | |
136 | ep = (struct scif_endpt *)window->ep; | |
137 | if (!len) | |
138 | break; | |
139 | start_va = window->va_for_temp; | |
140 | end_va = start_va + (window->nr_pages << PAGE_SHIFT); | |
141 | if (start < start_va && end <= start_va) | |
142 | break; | |
143 | if (start >= end_va) | |
144 | continue; | |
145 | __scif_rma_destroy_tcw_helper(window); | |
146 | } | |
147 | } | |
148 | ||
149 | static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len) | |
150 | { | |
151 | struct scif_endpt *ep = mmn->ep; | |
152 | ||
153 | spin_lock(&ep->rma_info.tc_lock); | |
154 | __scif_rma_destroy_tcw(mmn, ep, start, len); | |
155 | spin_unlock(&ep->rma_info.tc_lock); | |
156 | } | |
157 | ||
158 | static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep) | |
159 | { | |
160 | struct list_head *item, *tmp; | |
161 | struct scif_mmu_notif *mmn; | |
162 | ||
163 | list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { | |
164 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
165 | scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); | |
166 | } | |
167 | } | |
168 | ||
169 | static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep) | |
170 | { | |
171 | struct list_head *item, *tmp; | |
172 | struct scif_mmu_notif *mmn; | |
173 | ||
174 | spin_lock(&ep->rma_info.tc_lock); | |
175 | list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { | |
176 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
177 | __scif_rma_destroy_tcw(mmn, ep, 0, ULONG_MAX); | |
178 | } | |
179 | spin_unlock(&ep->rma_info.tc_lock); | |
180 | } | |
181 | ||
182 | static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) | |
183 | { | |
184 | if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit) | |
185 | return false; | |
186 | if ((atomic_read(&ep->rma_info.tcw_total_pages) | |
187 | + (cur_bytes >> PAGE_SHIFT)) > | |
188 | scif_info.rma_tc_limit) { | |
189 | dev_info(scif_info.mdev.this_device, | |
190 | "%s %d total=%d, current=%zu reached max\n", | |
191 | __func__, __LINE__, | |
192 | atomic_read(&ep->rma_info.tcw_total_pages), | |
193 | (1 + (cur_bytes >> PAGE_SHIFT))); | |
194 | scif_rma_destroy_tcw_invalid(); | |
195 | __scif_rma_destroy_tcw_ep(ep); | |
196 | } | |
197 | return true; | |
198 | } | |
199 | ||
200 | static void scif_mmu_notifier_release(struct mmu_notifier *mn, | |
201 | struct mm_struct *mm) | |
202 | { | |
203 | struct scif_mmu_notif *mmn; | |
204 | ||
205 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | |
206 | scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); | |
207 | schedule_work(&scif_info.misc_work); | |
208 | } | |
209 | ||
210 | static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn, | |
211 | struct mm_struct *mm, | |
212 | unsigned long address) | |
213 | { | |
214 | struct scif_mmu_notif *mmn; | |
215 | ||
216 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | |
217 | scif_rma_destroy_tcw(mmn, address, PAGE_SIZE); | |
218 | } | |
219 | ||
220 | static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | |
221 | struct mm_struct *mm, | |
222 | unsigned long start, | |
223 | unsigned long end) | |
224 | { | |
225 | struct scif_mmu_notif *mmn; | |
226 | ||
227 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | |
228 | scif_rma_destroy_tcw(mmn, start, end - start); | |
229 | } | |
230 | ||
231 | static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, | |
232 | struct mm_struct *mm, | |
233 | unsigned long start, | |
234 | unsigned long end) | |
235 | { | |
236 | /* | |
237 | * Nothing to do here, everything needed was done in | |
238 | * invalidate_range_start. | |
239 | */ | |
240 | } | |
241 | ||
242 | static const struct mmu_notifier_ops scif_mmu_notifier_ops = { | |
243 | .release = scif_mmu_notifier_release, | |
244 | .clear_flush_young = NULL, | |
245 | .invalidate_page = scif_mmu_notifier_invalidate_page, | |
246 | .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, | |
247 | .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; | |
248 | ||
249 | static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep) | |
250 | { | |
251 | struct scif_endpt_rma_info *rma = &ep->rma_info; | |
252 | struct scif_mmu_notif *mmn = NULL; | |
253 | struct list_head *item, *tmp; | |
254 | ||
255 | mutex_lock(&ep->rma_info.mmn_lock); | |
256 | list_for_each_safe(item, tmp, &rma->mmn_list) { | |
257 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
258 | mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm); | |
259 | list_del(item); | |
260 | kfree(mmn); | |
261 | } | |
262 | mutex_unlock(&ep->rma_info.mmn_lock); | |
263 | } | |
264 | ||
265 | static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn, | |
266 | struct mm_struct *mm, struct scif_endpt *ep) | |
267 | { | |
268 | mmn->ep = ep; | |
269 | mmn->mm = mm; | |
270 | mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops; | |
271 | INIT_LIST_HEAD(&mmn->list); | |
272 | INIT_LIST_HEAD(&mmn->tc_reg_list); | |
273 | } | |
274 | ||
275 | static struct scif_mmu_notif * | |
276 | scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) | |
277 | { | |
278 | struct scif_mmu_notif *mmn; | |
279 | struct list_head *item; | |
280 | ||
281 | list_for_each(item, &rma->mmn_list) { | |
282 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
283 | if (mmn->mm == mm) | |
284 | return mmn; | |
285 | } | |
286 | return NULL; | |
287 | } | |
288 | ||
289 | static struct scif_mmu_notif * | |
290 | scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) | |
291 | { | |
292 | struct scif_mmu_notif *mmn | |
293 | = kzalloc(sizeof(*mmn), GFP_KERNEL); | |
294 | ||
295 | if (!mmn) | |
296 | return ERR_PTR(ENOMEM); | |
297 | ||
298 | scif_init_mmu_notifier(mmn, current->mm, ep); | |
299 | if (mmu_notifier_register(&mmn->ep_mmu_notifier, | |
300 | current->mm)) { | |
301 | kfree(mmn); | |
302 | return ERR_PTR(EBUSY); | |
303 | } | |
304 | list_add(&mmn->list, &ep->rma_info.mmn_list); | |
305 | return mmn; | |
306 | } | |
307 | ||
308 | /* | |
309 | * Called from the misc thread to destroy temporary cached windows and | |
310 | * unregister the MMU notifier for the SCIF endpoint. | |
311 | */ | |
312 | void scif_mmu_notif_handler(struct work_struct *work) | |
313 | { | |
314 | struct list_head *pos, *tmpq; | |
315 | struct scif_endpt *ep; | |
316 | restart: | |
317 | scif_rma_destroy_tcw_invalid(); | |
318 | spin_lock(&scif_info.rmalock); | |
319 | list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) { | |
320 | ep = list_entry(pos, struct scif_endpt, mmu_list); | |
321 | list_del(&ep->mmu_list); | |
322 | spin_unlock(&scif_info.rmalock); | |
323 | scif_rma_destroy_tcw_ep(ep); | |
324 | scif_ep_unregister_mmu_notifier(ep); | |
325 | goto restart; | |
326 | } | |
327 | spin_unlock(&scif_info.rmalock); | |
328 | } | |
329 | ||
330 | static bool scif_is_set_reg_cache(int flags) | |
331 | { | |
332 | return !!(flags & SCIF_RMA_USECACHE); | |
333 | } | |
334 | #else | |
335 | static struct scif_mmu_notif * | |
336 | scif_find_mmu_notifier(struct mm_struct *mm, | |
337 | struct scif_endpt_rma_info *rma) | |
338 | { | |
339 | return NULL; | |
340 | } | |
341 | ||
342 | static struct scif_mmu_notif * | |
343 | scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) | |
344 | { | |
345 | return NULL; | |
346 | } | |
347 | ||
348 | void scif_mmu_notif_handler(struct work_struct *work) | |
349 | { | |
350 | } | |
351 | ||
352 | static bool scif_is_set_reg_cache(int flags) | |
353 | { | |
354 | return false; | |
355 | } | |
356 | ||
357 | static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) | |
358 | { | |
359 | return false; | |
360 | } | |
361 | #endif | |
362 | ||
363 | /** | |
364 | * scif_register_temp: | |
365 | * @epd: End Point Descriptor. | |
366 | * @addr: virtual address to/from which to copy | |
367 | * @len: length of range to copy | |
368 | * @out_offset: computed offset returned by reference. | |
369 | * @out_window: allocated registered window returned by reference. | |
370 | * | |
371 | * Create a temporary registered window. The peer will not know about this | |
372 | * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's. | |
373 | */ | |
374 | static int | |
375 | scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot, | |
376 | off_t *out_offset, struct scif_window **out_window) | |
377 | { | |
378 | struct scif_endpt *ep = (struct scif_endpt *)epd; | |
379 | int err; | |
380 | scif_pinned_pages_t pinned_pages; | |
381 | size_t aligned_len; | |
382 | ||
383 | aligned_len = ALIGN(len, PAGE_SIZE); | |
384 | ||
385 | err = __scif_pin_pages((void *)(addr & PAGE_MASK), | |
386 | aligned_len, &prot, 0, &pinned_pages); | |
387 | if (err) | |
388 | return err; | |
389 | ||
390 | pinned_pages->prot = prot; | |
391 | ||
392 | /* Compute the offset for this registration */ | |
393 | err = scif_get_window_offset(ep, 0, 0, | |
394 | aligned_len >> PAGE_SHIFT, | |
395 | (s64 *)out_offset); | |
396 | if (err) | |
397 | goto error_unpin; | |
398 | ||
399 | /* Allocate and prepare self registration window */ | |
400 | *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT, | |
401 | *out_offset, true); | |
402 | if (!*out_window) { | |
403 | scif_free_window_offset(ep, NULL, *out_offset); | |
404 | err = -ENOMEM; | |
405 | goto error_unpin; | |
406 | } | |
407 | ||
408 | (*out_window)->pinned_pages = pinned_pages; | |
409 | (*out_window)->nr_pages = pinned_pages->nr_pages; | |
410 | (*out_window)->prot = pinned_pages->prot; | |
411 | ||
412 | (*out_window)->va_for_temp = addr & PAGE_MASK; | |
413 | err = scif_map_window(ep->remote_dev, *out_window); | |
414 | if (err) { | |
415 | /* Something went wrong! Rollback */ | |
416 | scif_destroy_window(ep, *out_window); | |
417 | *out_window = NULL; | |
418 | } else { | |
419 | *out_offset |= (addr - (*out_window)->va_for_temp); | |
420 | } | |
421 | return err; | |
422 | error_unpin: | |
423 | if (err) | |
424 | dev_err(&ep->remote_dev->sdev->dev, | |
425 | "%s %d err %d\n", __func__, __LINE__, err); | |
426 | scif_unpin_pages(pinned_pages); | |
427 | return err; | |
428 | } | |
429 | ||
430 | #define SCIF_DMA_TO (3 * HZ) | |
431 | ||
432 | /* | |
433 | * scif_sync_dma - Program a DMA without an interrupt descriptor | |
434 | * | |
435 | * @dev - The address of the pointer to the device instance used | |
436 | * for DMA registration. | |
437 | * @chan - DMA channel to be used. | |
438 | * @sync_wait: Wait for DMA to complete? | |
439 | * | |
440 | * Return 0 on success and -errno on error. | |
441 | */ | |
442 | static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan, | |
443 | bool sync_wait) | |
444 | { | |
445 | int err = 0; | |
446 | struct dma_async_tx_descriptor *tx = NULL; | |
447 | enum dma_ctrl_flags flags = DMA_PREP_FENCE; | |
448 | dma_cookie_t cookie; | |
449 | struct dma_device *ddev; | |
450 | ||
451 | if (!chan) { | |
452 | err = -EIO; | |
453 | dev_err(&sdev->dev, "%s %d err %d\n", | |
454 | __func__, __LINE__, err); | |
455 | return err; | |
456 | } | |
457 | ddev = chan->device; | |
458 | ||
459 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); | |
460 | if (!tx) { | |
461 | err = -ENOMEM; | |
462 | dev_err(&sdev->dev, "%s %d err %d\n", | |
463 | __func__, __LINE__, err); | |
464 | goto release; | |
465 | } | |
466 | cookie = tx->tx_submit(tx); | |
467 | ||
468 | if (dma_submit_error(cookie)) { | |
469 | err = -ENOMEM; | |
470 | dev_err(&sdev->dev, "%s %d err %d\n", | |
471 | __func__, __LINE__, err); | |
472 | goto release; | |
473 | } | |
474 | if (!sync_wait) { | |
475 | dma_async_issue_pending(chan); | |
476 | } else { | |
477 | if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) { | |
478 | err = 0; | |
479 | } else { | |
480 | err = -EIO; | |
481 | dev_err(&sdev->dev, "%s %d err %d\n", | |
482 | __func__, __LINE__, err); | |
483 | } | |
484 | } | |
485 | release: | |
486 | return err; | |
487 | } | |
488 | ||
489 | static void scif_dma_callback(void *arg) | |
490 | { | |
491 | struct completion *done = (struct completion *)arg; | |
492 | ||
493 | complete(done); | |
494 | } | |
495 | ||
496 | #define SCIF_DMA_SYNC_WAIT true | |
497 | #define SCIF_DMA_POLL BIT(0) | |
498 | #define SCIF_DMA_INTR BIT(1) | |
499 | ||
500 | /* | |
501 | * scif_async_dma - Program a DMA with an interrupt descriptor | |
502 | * | |
503 | * @dev - The address of the pointer to the device instance used | |
504 | * for DMA registration. | |
505 | * @chan - DMA channel to be used. | |
506 | * Return 0 on success and -errno on error. | |
507 | */ | |
508 | static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan) | |
509 | { | |
510 | int err = 0; | |
511 | struct dma_device *ddev; | |
512 | struct dma_async_tx_descriptor *tx = NULL; | |
513 | enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; | |
514 | DECLARE_COMPLETION_ONSTACK(done_wait); | |
515 | dma_cookie_t cookie; | |
516 | enum dma_status status; | |
517 | ||
518 | if (!chan) { | |
519 | err = -EIO; | |
520 | dev_err(&sdev->dev, "%s %d err %d\n", | |
521 | __func__, __LINE__, err); | |
522 | return err; | |
523 | } | |
524 | ddev = chan->device; | |
525 | ||
526 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); | |
527 | if (!tx) { | |
528 | err = -ENOMEM; | |
529 | dev_err(&sdev->dev, "%s %d err %d\n", | |
530 | __func__, __LINE__, err); | |
531 | goto release; | |
532 | } | |
533 | reinit_completion(&done_wait); | |
534 | tx->callback = scif_dma_callback; | |
535 | tx->callback_param = &done_wait; | |
536 | cookie = tx->tx_submit(tx); | |
537 | ||
538 | if (dma_submit_error(cookie)) { | |
539 | err = -ENOMEM; | |
540 | dev_err(&sdev->dev, "%s %d err %d\n", | |
541 | __func__, __LINE__, err); | |
542 | goto release; | |
543 | } | |
544 | dma_async_issue_pending(chan); | |
545 | ||
546 | err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO); | |
547 | if (!err) { | |
548 | err = -EIO; | |
549 | dev_err(&sdev->dev, "%s %d err %d\n", | |
550 | __func__, __LINE__, err); | |
551 | goto release; | |
552 | } | |
553 | err = 0; | |
554 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
555 | if (status != DMA_COMPLETE) { | |
556 | err = -EIO; | |
557 | dev_err(&sdev->dev, "%s %d err %d\n", | |
558 | __func__, __LINE__, err); | |
559 | goto release; | |
560 | } | |
561 | release: | |
562 | return err; | |
563 | } | |
564 | ||
565 | /* | |
566 | * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular | |
567 | * DMA channel via polling. | |
568 | * | |
569 | * @sdev - The SCIF device | |
570 | * @chan - DMA channel | |
571 | * Return 0 on success and -errno on error. | |
572 | */ | |
573 | static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan) | |
574 | { | |
575 | if (!chan) | |
576 | return -EINVAL; | |
577 | return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT); | |
578 | } | |
579 | ||
580 | /* | |
581 | * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular | |
582 | * DMA channel via interrupt based blocking wait. | |
583 | * | |
584 | * @sdev - The SCIF device | |
585 | * @chan - DMA channel | |
586 | * Return 0 on success and -errno on error. | |
587 | */ | |
588 | int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan) | |
589 | { | |
590 | if (!chan) | |
591 | return -EINVAL; | |
592 | return scif_async_dma(sdev, chan); | |
593 | } | |
594 | ||
595 | /** | |
596 | * scif_rma_destroy_windows: | |
597 | * | |
598 | * This routine destroys all windows queued for cleanup | |
599 | */ | |
600 | void scif_rma_destroy_windows(void) | |
601 | { | |
602 | struct list_head *item, *tmp; | |
603 | struct scif_window *window; | |
604 | struct scif_endpt *ep; | |
605 | struct dma_chan *chan; | |
606 | ||
607 | might_sleep(); | |
608 | restart: | |
609 | spin_lock(&scif_info.rmalock); | |
610 | list_for_each_safe(item, tmp, &scif_info.rma) { | |
611 | window = list_entry(item, struct scif_window, | |
612 | list); | |
613 | ep = (struct scif_endpt *)window->ep; | |
614 | chan = ep->rma_info.dma_chan; | |
615 | ||
616 | list_del_init(&window->list); | |
617 | spin_unlock(&scif_info.rmalock); | |
618 | if (!chan || !scifdev_alive(ep) || | |
619 | !scif_drain_dma_intr(ep->remote_dev->sdev, | |
620 | ep->rma_info.dma_chan)) | |
621 | /* Remove window from global list */ | |
622 | window->unreg_state = OP_COMPLETED; | |
623 | else | |
624 | dev_warn(&ep->remote_dev->sdev->dev, | |
625 | "DMA engine hung?\n"); | |
626 | if (window->unreg_state == OP_COMPLETED) { | |
627 | if (window->type == SCIF_WINDOW_SELF) | |
628 | scif_destroy_window(ep, window); | |
629 | else | |
630 | scif_destroy_remote_window(window); | |
631 | atomic_dec(&ep->rma_info.tw_refcount); | |
632 | } | |
633 | goto restart; | |
634 | } | |
635 | spin_unlock(&scif_info.rmalock); | |
636 | } | |
637 | ||
638 | /** | |
639 | * scif_rma_destroy_tcw: | |
640 | * | |
641 | * This routine destroys temporary cached registered windows | |
642 | * which have been queued for cleanup. | |
643 | */ | |
644 | void scif_rma_destroy_tcw_invalid(void) | |
645 | { | |
646 | struct list_head *item, *tmp; | |
647 | struct scif_window *window; | |
648 | struct scif_endpt *ep; | |
649 | struct dma_chan *chan; | |
650 | ||
651 | might_sleep(); | |
652 | restart: | |
653 | spin_lock(&scif_info.rmalock); | |
654 | list_for_each_safe(item, tmp, &scif_info.rma_tc) { | |
655 | window = list_entry(item, struct scif_window, list); | |
656 | ep = (struct scif_endpt *)window->ep; | |
657 | chan = ep->rma_info.dma_chan; | |
658 | list_del_init(&window->list); | |
659 | spin_unlock(&scif_info.rmalock); | |
660 | mutex_lock(&ep->rma_info.rma_lock); | |
661 | if (!chan || !scifdev_alive(ep) || | |
662 | !scif_drain_dma_intr(ep->remote_dev->sdev, | |
663 | ep->rma_info.dma_chan)) { | |
664 | atomic_sub(window->nr_pages, | |
665 | &ep->rma_info.tcw_total_pages); | |
666 | scif_destroy_window(ep, window); | |
667 | atomic_dec(&ep->rma_info.tcw_refcount); | |
668 | } else { | |
669 | dev_warn(&ep->remote_dev->sdev->dev, | |
670 | "DMA engine hung?\n"); | |
671 | } | |
672 | mutex_unlock(&ep->rma_info.rma_lock); | |
673 | goto restart; | |
674 | } | |
675 | spin_unlock(&scif_info.rmalock); | |
676 | } | |
677 | ||
678 | static inline | |
679 | void *_get_local_va(off_t off, struct scif_window *window, size_t len) | |
680 | { | |
681 | int page_nr = (off - window->offset) >> PAGE_SHIFT; | |
682 | off_t page_off = off & ~PAGE_MASK; | |
683 | void *va = NULL; | |
684 | ||
685 | if (window->type == SCIF_WINDOW_SELF) { | |
686 | struct page **pages = window->pinned_pages->pages; | |
687 | ||
688 | va = page_address(pages[page_nr]) + page_off; | |
689 | } | |
690 | return va; | |
691 | } | |
692 | ||
693 | static inline | |
694 | void *ioremap_remote(off_t off, struct scif_window *window, | |
695 | size_t len, struct scif_dev *dev, | |
696 | struct scif_window_iter *iter) | |
697 | { | |
698 | dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter); | |
699 | ||
700 | /* | |
701 | * If the DMA address is not card relative then we need the DMA | |
702 | * addresses to be an offset into the bar. The aperture base was already | |
703 | * added so subtract it here since scif_ioremap is going to add it again | |
704 | */ | |
705 | if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER && | |
706 | dev->sdev->aper && !dev->sdev->card_rel_da) | |
707 | phys = phys - dev->sdev->aper->pa; | |
708 | return scif_ioremap(phys, len, dev); | |
709 | } | |
710 | ||
711 | static inline void | |
712 | iounmap_remote(void *virt, size_t size, struct scif_copy_work *work) | |
713 | { | |
714 | scif_iounmap(virt, size, work->remote_dev); | |
715 | } | |
716 | ||
717 | /* | |
718 | * Takes care of ordering issue caused by | |
719 | * 1. Hardware: Only in the case of cpu copy from mgmt node to card | |
720 | * because of WC memory. | |
721 | * 2. Software: If memcpy reorders copy instructions for optimization. | |
722 | * This could happen at both mgmt node and card. | |
723 | */ | |
724 | static inline void | |
725 | scif_ordered_memcpy_toio(char *dst, const char *src, size_t count) | |
726 | { | |
727 | if (!count) | |
728 | return; | |
729 | ||
730 | memcpy_toio((void __iomem __force *)dst, src, --count); | |
731 | /* Order the last byte with the previous stores */ | |
732 | wmb(); | |
733 | *(dst + count) = *(src + count); | |
734 | } | |
735 | ||
736 | static inline void scif_unaligned_cpy_toio(char *dst, const char *src, | |
737 | size_t count, bool ordered) | |
738 | { | |
739 | if (ordered) | |
740 | scif_ordered_memcpy_toio(dst, src, count); | |
741 | else | |
742 | memcpy_toio((void __iomem __force *)dst, src, count); | |
743 | } | |
744 | ||
745 | static inline | |
746 | void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count) | |
747 | { | |
748 | if (!count) | |
749 | return; | |
750 | ||
751 | memcpy_fromio(dst, (void __iomem __force *)src, --count); | |
752 | /* Order the last byte with the previous loads */ | |
753 | rmb(); | |
754 | *(dst + count) = *(src + count); | |
755 | } | |
756 | ||
757 | static inline void scif_unaligned_cpy_fromio(char *dst, const char *src, | |
758 | size_t count, bool ordered) | |
759 | { | |
760 | if (ordered) | |
761 | scif_ordered_memcpy_fromio(dst, src, count); | |
762 | else | |
763 | memcpy_fromio(dst, (void __iomem __force *)src, count); | |
764 | } | |
765 | ||
766 | #define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0) | |
767 | ||
768 | /* | |
769 | * scif_off_to_dma_addr: | |
770 | * Obtain the dma_addr given the window and the offset. | |
771 | * @window: Registered window. | |
772 | * @off: Window offset. | |
773 | * @nr_bytes: Return the number of contiguous bytes till next DMA addr index. | |
774 | * @index: Return the index of the dma_addr array found. | |
775 | * @start_off: start offset of index of the dma addr array found. | |
776 | * The nr_bytes provides the callee an estimate of the maximum possible | |
777 | * DMA xfer possible while the index/start_off provide faster lookups | |
778 | * for the next iteration. | |
779 | */ | |
780 | dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off, | |
781 | size_t *nr_bytes, struct scif_window_iter *iter) | |
782 | { | |
783 | int i, page_nr; | |
784 | s64 start, end; | |
785 | off_t page_off; | |
786 | ||
787 | if (window->nr_pages == window->nr_contig_chunks) { | |
788 | page_nr = (off - window->offset) >> PAGE_SHIFT; | |
789 | page_off = off & ~PAGE_MASK; | |
790 | ||
791 | if (nr_bytes) | |
792 | *nr_bytes = PAGE_SIZE - page_off; | |
793 | return window->dma_addr[page_nr] | page_off; | |
794 | } | |
795 | if (iter) { | |
796 | i = iter->index; | |
797 | start = iter->offset; | |
798 | } else { | |
799 | i = 0; | |
800 | start = window->offset; | |
801 | } | |
802 | for (; i < window->nr_contig_chunks; i++) { | |
803 | end = start + (window->num_pages[i] << PAGE_SHIFT); | |
804 | if (off >= start && off < end) { | |
805 | if (iter) { | |
806 | iter->index = i; | |
807 | iter->offset = start; | |
808 | } | |
809 | if (nr_bytes) | |
810 | *nr_bytes = end - off; | |
811 | return (window->dma_addr[i] + (off - start)); | |
812 | } | |
813 | start += (window->num_pages[i] << PAGE_SHIFT); | |
814 | } | |
815 | dev_err(scif_info.mdev.this_device, | |
816 | "%s %d BUG. Addr not found? window %p off 0x%llx\n", | |
817 | __func__, __LINE__, window, off); | |
818 | return SCIF_RMA_ERROR_CODE; | |
819 | } | |
820 | ||
821 | /* | |
822 | * Copy between rma window and temporary buffer | |
823 | */ | |
824 | static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window, | |
825 | u8 *temp, size_t rem_len, bool to_temp) | |
826 | { | |
827 | void *window_virt; | |
828 | size_t loop_len; | |
829 | int offset_in_page; | |
830 | s64 end_offset; | |
831 | ||
832 | offset_in_page = offset & ~PAGE_MASK; | |
833 | loop_len = PAGE_SIZE - offset_in_page; | |
834 | ||
835 | if (rem_len < loop_len) | |
836 | loop_len = rem_len; | |
837 | ||
838 | window_virt = _get_local_va(offset, window, loop_len); | |
839 | if (!window_virt) | |
840 | return; | |
841 | if (to_temp) | |
842 | memcpy(temp, window_virt, loop_len); | |
843 | else | |
844 | memcpy(window_virt, temp, loop_len); | |
845 | ||
846 | offset += loop_len; | |
847 | temp += loop_len; | |
848 | rem_len -= loop_len; | |
849 | ||
850 | end_offset = window->offset + | |
851 | (window->nr_pages << PAGE_SHIFT); | |
852 | while (rem_len) { | |
853 | if (offset == end_offset) { | |
854 | window = list_entry_next(window, list); | |
855 | end_offset = window->offset + | |
856 | (window->nr_pages << PAGE_SHIFT); | |
857 | } | |
858 | loop_len = min(PAGE_SIZE, rem_len); | |
859 | window_virt = _get_local_va(offset, window, loop_len); | |
860 | if (!window_virt) | |
861 | return; | |
862 | if (to_temp) | |
863 | memcpy(temp, window_virt, loop_len); | |
864 | else | |
865 | memcpy(window_virt, temp, loop_len); | |
866 | offset += loop_len; | |
867 | temp += loop_len; | |
868 | rem_len -= loop_len; | |
869 | } | |
870 | } | |
871 | ||
872 | /** | |
873 | * scif_rma_completion_cb: | |
874 | * @data: RMA cookie | |
875 | * | |
876 | * RMA interrupt completion callback. | |
877 | */ | |
878 | static void scif_rma_completion_cb(void *data) | |
879 | { | |
880 | struct scif_dma_comp_cb *comp_cb = data; | |
881 | ||
882 | /* Free DMA Completion CB. */ | |
883 | if (comp_cb->dst_window) | |
884 | scif_rma_local_cpu_copy(comp_cb->dst_offset, | |
885 | comp_cb->dst_window, | |
886 | comp_cb->temp_buf + | |
887 | comp_cb->header_padding, | |
888 | comp_cb->len, false); | |
889 | scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev, | |
890 | SCIF_KMEM_UNALIGNED_BUF_SIZE); | |
891 | if (comp_cb->is_cache) | |
892 | kmem_cache_free(unaligned_cache, | |
893 | comp_cb->temp_buf_to_free); | |
894 | else | |
895 | kfree(comp_cb->temp_buf_to_free); | |
896 | } | |
897 | ||
898 | /* Copies between temporary buffer and offsets provided in work */ | |
899 | static int | |
900 | scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, | |
901 | u8 *temp, struct dma_chan *chan, | |
902 | bool src_local) | |
903 | { | |
904 | struct scif_dma_comp_cb *comp_cb = work->comp_cb; | |
905 | dma_addr_t window_dma_addr, temp_dma_addr; | |
906 | dma_addr_t temp_phys = comp_cb->temp_phys; | |
907 | size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len; | |
908 | int offset_in_ca, ret = 0; | |
909 | s64 end_offset, offset; | |
910 | struct scif_window *window; | |
911 | void *window_virt_addr; | |
912 | size_t tail_len; | |
913 | struct dma_async_tx_descriptor *tx; | |
914 | struct dma_device *dev = chan->device; | |
915 | dma_cookie_t cookie; | |
916 | ||
917 | if (src_local) { | |
918 | offset = work->dst_offset; | |
919 | window = work->dst_window; | |
920 | } else { | |
921 | offset = work->src_offset; | |
922 | window = work->src_window; | |
923 | } | |
924 | ||
925 | offset_in_ca = offset & (L1_CACHE_BYTES - 1); | |
926 | if (offset_in_ca) { | |
927 | loop_len = L1_CACHE_BYTES - offset_in_ca; | |
928 | loop_len = min(loop_len, remaining_len); | |
929 | window_virt_addr = ioremap_remote(offset, window, | |
930 | loop_len, | |
931 | work->remote_dev, | |
932 | NULL); | |
933 | if (!window_virt_addr) | |
934 | return -ENOMEM; | |
935 | if (src_local) | |
936 | scif_unaligned_cpy_toio(window_virt_addr, temp, | |
937 | loop_len, | |
938 | work->ordered && | |
939 | !(remaining_len - loop_len)); | |
940 | else | |
941 | scif_unaligned_cpy_fromio(temp, window_virt_addr, | |
942 | loop_len, work->ordered && | |
943 | !(remaining_len - loop_len)); | |
944 | iounmap_remote(window_virt_addr, loop_len, work); | |
945 | ||
946 | offset += loop_len; | |
947 | temp += loop_len; | |
948 | temp_phys += loop_len; | |
949 | remaining_len -= loop_len; | |
950 | } | |
951 | ||
952 | offset_in_ca = offset & ~PAGE_MASK; | |
953 | end_offset = window->offset + | |
954 | (window->nr_pages << PAGE_SHIFT); | |
955 | ||
956 | tail_len = remaining_len & (L1_CACHE_BYTES - 1); | |
957 | remaining_len -= tail_len; | |
958 | while (remaining_len) { | |
959 | if (offset == end_offset) { | |
960 | window = list_entry_next(window, list); | |
961 | end_offset = window->offset + | |
962 | (window->nr_pages << PAGE_SHIFT); | |
963 | } | |
964 | if (scif_is_mgmt_node()) | |
965 | temp_dma_addr = temp_phys; | |
966 | else | |
967 | /* Fix if we ever enable IOMMU on the card */ | |
968 | temp_dma_addr = (dma_addr_t)virt_to_phys(temp); | |
969 | window_dma_addr = scif_off_to_dma_addr(window, offset, | |
970 | &nr_contig_bytes, | |
971 | NULL); | |
972 | loop_len = min(nr_contig_bytes, remaining_len); | |
973 | if (src_local) { | |
974 | if (work->ordered && !tail_len && | |
975 | !(remaining_len - loop_len) && | |
976 | loop_len != L1_CACHE_BYTES) { | |
977 | /* | |
978 | * Break up the last chunk of the transfer into | |
979 | * two steps. if there is no tail to guarantee | |
980 | * DMA ordering. SCIF_DMA_POLLING inserts | |
981 | * a status update descriptor in step 1 which | |
982 | * acts as a double sided synchronization fence | |
983 | * for the DMA engine to ensure that the last | |
984 | * cache line in step 2 is updated last. | |
985 | */ | |
986 | /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ | |
987 | tx = | |
988 | dev->device_prep_dma_memcpy(chan, | |
989 | window_dma_addr, | |
990 | temp_dma_addr, | |
991 | loop_len - | |
992 | L1_CACHE_BYTES, | |
993 | DMA_PREP_FENCE); | |
994 | if (!tx) { | |
995 | ret = -ENOMEM; | |
996 | goto err; | |
997 | } | |
998 | cookie = tx->tx_submit(tx); | |
999 | if (dma_submit_error(cookie)) { | |
1000 | ret = -ENOMEM; | |
1001 | goto err; | |
1002 | } | |
1003 | dma_async_issue_pending(chan); | |
1004 | offset += (loop_len - L1_CACHE_BYTES); | |
1005 | temp_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1006 | window_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1007 | remaining_len -= (loop_len - L1_CACHE_BYTES); | |
1008 | loop_len = remaining_len; | |
1009 | ||
1010 | /* Step 2) DMA: L1_CACHE_BYTES */ | |
1011 | tx = | |
1012 | dev->device_prep_dma_memcpy(chan, | |
1013 | window_dma_addr, | |
1014 | temp_dma_addr, | |
1015 | loop_len, 0); | |
1016 | if (!tx) { | |
1017 | ret = -ENOMEM; | |
1018 | goto err; | |
1019 | } | |
1020 | cookie = tx->tx_submit(tx); | |
1021 | if (dma_submit_error(cookie)) { | |
1022 | ret = -ENOMEM; | |
1023 | goto err; | |
1024 | } | |
1025 | dma_async_issue_pending(chan); | |
1026 | } else { | |
1027 | tx = | |
1028 | dev->device_prep_dma_memcpy(chan, | |
1029 | window_dma_addr, | |
1030 | temp_dma_addr, | |
1031 | loop_len, 0); | |
1032 | if (!tx) { | |
1033 | ret = -ENOMEM; | |
1034 | goto err; | |
1035 | } | |
1036 | cookie = tx->tx_submit(tx); | |
1037 | if (dma_submit_error(cookie)) { | |
1038 | ret = -ENOMEM; | |
1039 | goto err; | |
1040 | } | |
1041 | dma_async_issue_pending(chan); | |
1042 | } | |
1043 | } else { | |
1044 | tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr, | |
1045 | window_dma_addr, loop_len, 0); | |
1046 | if (!tx) { | |
1047 | ret = -ENOMEM; | |
1048 | goto err; | |
1049 | } | |
1050 | cookie = tx->tx_submit(tx); | |
1051 | if (dma_submit_error(cookie)) { | |
1052 | ret = -ENOMEM; | |
1053 | goto err; | |
1054 | } | |
1055 | dma_async_issue_pending(chan); | |
1056 | } | |
1057 | if (ret < 0) | |
1058 | goto err; | |
1059 | offset += loop_len; | |
1060 | temp += loop_len; | |
1061 | temp_phys += loop_len; | |
1062 | remaining_len -= loop_len; | |
1063 | offset_in_ca = 0; | |
1064 | } | |
1065 | if (tail_len) { | |
1066 | if (offset == end_offset) { | |
1067 | window = list_entry_next(window, list); | |
1068 | end_offset = window->offset + | |
1069 | (window->nr_pages << PAGE_SHIFT); | |
1070 | } | |
1071 | window_virt_addr = ioremap_remote(offset, window, tail_len, | |
1072 | work->remote_dev, | |
1073 | NULL); | |
1074 | if (!window_virt_addr) | |
1075 | return -ENOMEM; | |
1076 | /* | |
1077 | * The CPU copy for the tail bytes must be initiated only once | |
1078 | * previous DMA transfers for this endpoint have completed | |
1079 | * to guarantee ordering. | |
1080 | */ | |
1081 | if (work->ordered) { | |
1082 | struct scif_dev *rdev = work->remote_dev; | |
1083 | ||
1084 | ret = scif_drain_dma_intr(rdev->sdev, chan); | |
1085 | if (ret) | |
1086 | return ret; | |
1087 | } | |
1088 | if (src_local) | |
1089 | scif_unaligned_cpy_toio(window_virt_addr, temp, | |
1090 | tail_len, work->ordered); | |
1091 | else | |
1092 | scif_unaligned_cpy_fromio(temp, window_virt_addr, | |
1093 | tail_len, work->ordered); | |
1094 | iounmap_remote(window_virt_addr, tail_len, work); | |
1095 | } | |
1096 | tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT); | |
1097 | if (!tx) { | |
1098 | ret = -ENOMEM; | |
1099 | return ret; | |
1100 | } | |
1101 | tx->callback = &scif_rma_completion_cb; | |
1102 | tx->callback_param = comp_cb; | |
1103 | cookie = tx->tx_submit(tx); | |
1104 | ||
1105 | if (dma_submit_error(cookie)) { | |
1106 | ret = -ENOMEM; | |
1107 | return ret; | |
1108 | } | |
1109 | dma_async_issue_pending(chan); | |
1110 | return 0; | |
1111 | err: | |
1112 | dev_err(scif_info.mdev.this_device, | |
1113 | "%s %d Desc Prog Failed ret %d\n", | |
1114 | __func__, __LINE__, ret); | |
1115 | return ret; | |
1116 | } | |
1117 | ||
1118 | /* | |
1119 | * _scif_rma_list_dma_copy_aligned: | |
1120 | * | |
1121 | * Traverse all the windows and perform DMA copy. | |
1122 | */ | |
1123 | static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |
1124 | struct dma_chan *chan) | |
1125 | { | |
1126 | dma_addr_t src_dma_addr, dst_dma_addr; | |
1127 | size_t loop_len, remaining_len, src_contig_bytes = 0; | |
1128 | size_t dst_contig_bytes = 0; | |
1129 | struct scif_window_iter src_win_iter; | |
1130 | struct scif_window_iter dst_win_iter; | |
1131 | s64 end_src_offset, end_dst_offset; | |
1132 | struct scif_window *src_window = work->src_window; | |
1133 | struct scif_window *dst_window = work->dst_window; | |
1134 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1135 | int ret = 0; | |
1136 | struct dma_async_tx_descriptor *tx; | |
1137 | struct dma_device *dev = chan->device; | |
1138 | dma_cookie_t cookie; | |
1139 | ||
1140 | remaining_len = work->len; | |
1141 | ||
1142 | scif_init_window_iter(src_window, &src_win_iter); | |
1143 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1144 | end_src_offset = src_window->offset + | |
1145 | (src_window->nr_pages << PAGE_SHIFT); | |
1146 | end_dst_offset = dst_window->offset + | |
1147 | (dst_window->nr_pages << PAGE_SHIFT); | |
1148 | while (remaining_len) { | |
1149 | if (src_offset == end_src_offset) { | |
1150 | src_window = list_entry_next(src_window, list); | |
1151 | end_src_offset = src_window->offset + | |
1152 | (src_window->nr_pages << PAGE_SHIFT); | |
1153 | scif_init_window_iter(src_window, &src_win_iter); | |
1154 | } | |
1155 | if (dst_offset == end_dst_offset) { | |
1156 | dst_window = list_entry_next(dst_window, list); | |
1157 | end_dst_offset = dst_window->offset + | |
1158 | (dst_window->nr_pages << PAGE_SHIFT); | |
1159 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1160 | } | |
1161 | ||
1162 | /* compute dma addresses for transfer */ | |
1163 | src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, | |
1164 | &src_contig_bytes, | |
1165 | &src_win_iter); | |
1166 | dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, | |
1167 | &dst_contig_bytes, | |
1168 | &dst_win_iter); | |
1169 | loop_len = min(src_contig_bytes, dst_contig_bytes); | |
1170 | loop_len = min(loop_len, remaining_len); | |
1171 | if (work->ordered && !(remaining_len - loop_len)) { | |
1172 | /* | |
1173 | * Break up the last chunk of the transfer into two | |
1174 | * steps to ensure that the last byte in step 2 is | |
1175 | * updated last. | |
1176 | */ | |
1177 | /* Step 1) DMA: Body Length - 1 */ | |
1178 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1179 | src_dma_addr, | |
1180 | loop_len - 1, | |
1181 | DMA_PREP_FENCE); | |
1182 | if (!tx) { | |
1183 | ret = -ENOMEM; | |
1184 | goto err; | |
1185 | } | |
1186 | cookie = tx->tx_submit(tx); | |
1187 | if (dma_submit_error(cookie)) { | |
1188 | ret = -ENOMEM; | |
1189 | goto err; | |
1190 | } | |
1191 | src_offset += (loop_len - 1); | |
1192 | dst_offset += (loop_len - 1); | |
1193 | src_dma_addr += (loop_len - 1); | |
1194 | dst_dma_addr += (loop_len - 1); | |
1195 | remaining_len -= (loop_len - 1); | |
1196 | loop_len = remaining_len; | |
1197 | ||
1198 | /* Step 2) DMA: 1 BYTES */ | |
1199 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1200 | src_dma_addr, loop_len, 0); | |
1201 | if (!tx) { | |
1202 | ret = -ENOMEM; | |
1203 | goto err; | |
1204 | } | |
1205 | cookie = tx->tx_submit(tx); | |
1206 | if (dma_submit_error(cookie)) { | |
1207 | ret = -ENOMEM; | |
1208 | goto err; | |
1209 | } | |
1210 | dma_async_issue_pending(chan); | |
1211 | } else { | |
1212 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1213 | src_dma_addr, loop_len, 0); | |
1214 | if (!tx) { | |
1215 | ret = -ENOMEM; | |
1216 | goto err; | |
1217 | } | |
1218 | cookie = tx->tx_submit(tx); | |
1219 | if (dma_submit_error(cookie)) { | |
1220 | ret = -ENOMEM; | |
1221 | goto err; | |
1222 | } | |
1223 | } | |
1224 | src_offset += loop_len; | |
1225 | dst_offset += loop_len; | |
1226 | remaining_len -= loop_len; | |
1227 | } | |
1228 | return ret; | |
1229 | err: | |
1230 | dev_err(scif_info.mdev.this_device, | |
1231 | "%s %d Desc Prog Failed ret %d\n", | |
1232 | __func__, __LINE__, ret); | |
1233 | return ret; | |
1234 | } | |
1235 | ||
1236 | /* | |
1237 | * scif_rma_list_dma_copy_aligned: | |
1238 | * | |
1239 | * Traverse all the windows and perform DMA copy. | |
1240 | */ | |
1241 | static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |
1242 | struct dma_chan *chan) | |
1243 | { | |
1244 | dma_addr_t src_dma_addr, dst_dma_addr; | |
1245 | size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0; | |
1246 | size_t dst_contig_bytes = 0; | |
1247 | int src_cache_off; | |
1248 | s64 end_src_offset, end_dst_offset; | |
1249 | struct scif_window_iter src_win_iter; | |
1250 | struct scif_window_iter dst_win_iter; | |
1251 | void *src_virt, *dst_virt; | |
1252 | struct scif_window *src_window = work->src_window; | |
1253 | struct scif_window *dst_window = work->dst_window; | |
1254 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1255 | int ret = 0; | |
1256 | struct dma_async_tx_descriptor *tx; | |
1257 | struct dma_device *dev = chan->device; | |
1258 | dma_cookie_t cookie; | |
1259 | ||
1260 | remaining_len = work->len; | |
1261 | scif_init_window_iter(src_window, &src_win_iter); | |
1262 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1263 | ||
1264 | src_cache_off = src_offset & (L1_CACHE_BYTES - 1); | |
1265 | if (src_cache_off != 0) { | |
1266 | /* Head */ | |
1267 | loop_len = L1_CACHE_BYTES - src_cache_off; | |
1268 | loop_len = min(loop_len, remaining_len); | |
1269 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); | |
1270 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); | |
1271 | if (src_window->type == SCIF_WINDOW_SELF) | |
1272 | src_virt = _get_local_va(src_offset, src_window, | |
1273 | loop_len); | |
1274 | else | |
1275 | src_virt = ioremap_remote(src_offset, src_window, | |
1276 | loop_len, | |
1277 | work->remote_dev, NULL); | |
1278 | if (!src_virt) | |
1279 | return -ENOMEM; | |
1280 | if (dst_window->type == SCIF_WINDOW_SELF) | |
1281 | dst_virt = _get_local_va(dst_offset, dst_window, | |
1282 | loop_len); | |
1283 | else | |
1284 | dst_virt = ioremap_remote(dst_offset, dst_window, | |
1285 | loop_len, | |
1286 | work->remote_dev, NULL); | |
1287 | if (!dst_virt) { | |
1288 | if (src_window->type != SCIF_WINDOW_SELF) | |
1289 | iounmap_remote(src_virt, loop_len, work); | |
1290 | return -ENOMEM; | |
1291 | } | |
1292 | if (src_window->type == SCIF_WINDOW_SELF) | |
1293 | scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, | |
1294 | remaining_len == loop_len ? | |
1295 | work->ordered : false); | |
1296 | else | |
1297 | scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len, | |
1298 | remaining_len == loop_len ? | |
1299 | work->ordered : false); | |
1300 | if (src_window->type != SCIF_WINDOW_SELF) | |
1301 | iounmap_remote(src_virt, loop_len, work); | |
1302 | if (dst_window->type != SCIF_WINDOW_SELF) | |
1303 | iounmap_remote(dst_virt, loop_len, work); | |
1304 | src_offset += loop_len; | |
1305 | dst_offset += loop_len; | |
1306 | remaining_len -= loop_len; | |
1307 | } | |
1308 | ||
1309 | end_src_offset = src_window->offset + | |
1310 | (src_window->nr_pages << PAGE_SHIFT); | |
1311 | end_dst_offset = dst_window->offset + | |
1312 | (dst_window->nr_pages << PAGE_SHIFT); | |
1313 | tail_len = remaining_len & (L1_CACHE_BYTES - 1); | |
1314 | remaining_len -= tail_len; | |
1315 | while (remaining_len) { | |
1316 | if (src_offset == end_src_offset) { | |
1317 | src_window = list_entry_next(src_window, list); | |
1318 | end_src_offset = src_window->offset + | |
1319 | (src_window->nr_pages << PAGE_SHIFT); | |
1320 | scif_init_window_iter(src_window, &src_win_iter); | |
1321 | } | |
1322 | if (dst_offset == end_dst_offset) { | |
1323 | dst_window = list_entry_next(dst_window, list); | |
1324 | end_dst_offset = dst_window->offset + | |
1325 | (dst_window->nr_pages << PAGE_SHIFT); | |
1326 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1327 | } | |
1328 | ||
1329 | /* compute dma addresses for transfer */ | |
1330 | src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, | |
1331 | &src_contig_bytes, | |
1332 | &src_win_iter); | |
1333 | dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, | |
1334 | &dst_contig_bytes, | |
1335 | &dst_win_iter); | |
1336 | loop_len = min(src_contig_bytes, dst_contig_bytes); | |
1337 | loop_len = min(loop_len, remaining_len); | |
1338 | if (work->ordered && !tail_len && | |
1339 | !(remaining_len - loop_len)) { | |
1340 | /* | |
1341 | * Break up the last chunk of the transfer into two | |
1342 | * steps. if there is no tail to gurantee DMA ordering. | |
1343 | * Passing SCIF_DMA_POLLING inserts a status update | |
1344 | * descriptor in step 1 which acts as a double sided | |
1345 | * synchronization fence for the DMA engine to ensure | |
1346 | * that the last cache line in step 2 is updated last. | |
1347 | */ | |
1348 | /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ | |
1349 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1350 | src_dma_addr, | |
1351 | loop_len - | |
1352 | L1_CACHE_BYTES, | |
1353 | DMA_PREP_FENCE); | |
1354 | if (!tx) { | |
1355 | ret = -ENOMEM; | |
1356 | goto err; | |
1357 | } | |
1358 | cookie = tx->tx_submit(tx); | |
1359 | if (dma_submit_error(cookie)) { | |
1360 | ret = -ENOMEM; | |
1361 | goto err; | |
1362 | } | |
1363 | dma_async_issue_pending(chan); | |
1364 | src_offset += (loop_len - L1_CACHE_BYTES); | |
1365 | dst_offset += (loop_len - L1_CACHE_BYTES); | |
1366 | src_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1367 | dst_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1368 | remaining_len -= (loop_len - L1_CACHE_BYTES); | |
1369 | loop_len = remaining_len; | |
1370 | ||
1371 | /* Step 2) DMA: L1_CACHE_BYTES */ | |
1372 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1373 | src_dma_addr, | |
1374 | loop_len, 0); | |
1375 | if (!tx) { | |
1376 | ret = -ENOMEM; | |
1377 | goto err; | |
1378 | } | |
1379 | cookie = tx->tx_submit(tx); | |
1380 | if (dma_submit_error(cookie)) { | |
1381 | ret = -ENOMEM; | |
1382 | goto err; | |
1383 | } | |
1384 | dma_async_issue_pending(chan); | |
1385 | } else { | |
1386 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1387 | src_dma_addr, | |
1388 | loop_len, 0); | |
1389 | if (!tx) { | |
1390 | ret = -ENOMEM; | |
1391 | goto err; | |
1392 | } | |
1393 | cookie = tx->tx_submit(tx); | |
1394 | if (dma_submit_error(cookie)) { | |
1395 | ret = -ENOMEM; | |
1396 | goto err; | |
1397 | } | |
1398 | dma_async_issue_pending(chan); | |
1399 | } | |
1400 | src_offset += loop_len; | |
1401 | dst_offset += loop_len; | |
1402 | remaining_len -= loop_len; | |
1403 | } | |
1404 | remaining_len = tail_len; | |
1405 | if (remaining_len) { | |
1406 | loop_len = remaining_len; | |
1407 | if (src_offset == end_src_offset) | |
1408 | src_window = list_entry_next(src_window, list); | |
1409 | if (dst_offset == end_dst_offset) | |
1410 | dst_window = list_entry_next(dst_window, list); | |
1411 | ||
1412 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); | |
1413 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); | |
1414 | /* | |
1415 | * The CPU copy for the tail bytes must be initiated only once | |
1416 | * previous DMA transfers for this endpoint have completed to | |
1417 | * guarantee ordering. | |
1418 | */ | |
1419 | if (work->ordered) { | |
1420 | struct scif_dev *rdev = work->remote_dev; | |
1421 | ||
1422 | ret = scif_drain_dma_poll(rdev->sdev, chan); | |
1423 | if (ret) | |
1424 | return ret; | |
1425 | } | |
1426 | if (src_window->type == SCIF_WINDOW_SELF) | |
1427 | src_virt = _get_local_va(src_offset, src_window, | |
1428 | loop_len); | |
1429 | else | |
1430 | src_virt = ioremap_remote(src_offset, src_window, | |
1431 | loop_len, | |
1432 | work->remote_dev, NULL); | |
1433 | if (!src_virt) | |
1434 | return -ENOMEM; | |
1435 | ||
1436 | if (dst_window->type == SCIF_WINDOW_SELF) | |
1437 | dst_virt = _get_local_va(dst_offset, dst_window, | |
1438 | loop_len); | |
1439 | else | |
1440 | dst_virt = ioremap_remote(dst_offset, dst_window, | |
1441 | loop_len, | |
1442 | work->remote_dev, NULL); | |
1443 | if (!dst_virt) { | |
1444 | if (src_window->type != SCIF_WINDOW_SELF) | |
1445 | iounmap_remote(src_virt, loop_len, work); | |
1446 | return -ENOMEM; | |
1447 | } | |
1448 | ||
1449 | if (src_window->type == SCIF_WINDOW_SELF) | |
1450 | scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, | |
1451 | work->ordered); | |
1452 | else | |
1453 | scif_unaligned_cpy_fromio(dst_virt, src_virt, | |
1454 | loop_len, work->ordered); | |
1455 | if (src_window->type != SCIF_WINDOW_SELF) | |
1456 | iounmap_remote(src_virt, loop_len, work); | |
1457 | ||
1458 | if (dst_window->type != SCIF_WINDOW_SELF) | |
1459 | iounmap_remote(dst_virt, loop_len, work); | |
1460 | remaining_len -= loop_len; | |
1461 | } | |
1462 | return ret; | |
1463 | err: | |
1464 | dev_err(scif_info.mdev.this_device, | |
1465 | "%s %d Desc Prog Failed ret %d\n", | |
1466 | __func__, __LINE__, ret); | |
1467 | return ret; | |
1468 | } | |
1469 | ||
1470 | /* | |
1471 | * scif_rma_list_cpu_copy: | |
1472 | * | |
1473 | * Traverse all the windows and perform CPU copy. | |
1474 | */ | |
1475 | static int scif_rma_list_cpu_copy(struct scif_copy_work *work) | |
1476 | { | |
1477 | void *src_virt, *dst_virt; | |
1478 | size_t loop_len, remaining_len; | |
1479 | int src_page_off, dst_page_off; | |
1480 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1481 | struct scif_window *src_window = work->src_window; | |
1482 | struct scif_window *dst_window = work->dst_window; | |
1483 | s64 end_src_offset, end_dst_offset; | |
1484 | int ret = 0; | |
1485 | struct scif_window_iter src_win_iter; | |
1486 | struct scif_window_iter dst_win_iter; | |
1487 | ||
1488 | remaining_len = work->len; | |
1489 | ||
1490 | scif_init_window_iter(src_window, &src_win_iter); | |
1491 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1492 | while (remaining_len) { | |
1493 | src_page_off = src_offset & ~PAGE_MASK; | |
1494 | dst_page_off = dst_offset & ~PAGE_MASK; | |
1495 | loop_len = min(PAGE_SIZE - | |
1496 | max(src_page_off, dst_page_off), | |
1497 | remaining_len); | |
1498 | ||
1499 | if (src_window->type == SCIF_WINDOW_SELF) | |
1500 | src_virt = _get_local_va(src_offset, src_window, | |
1501 | loop_len); | |
1502 | else | |
1503 | src_virt = ioremap_remote(src_offset, src_window, | |
1504 | loop_len, | |
1505 | work->remote_dev, | |
1506 | &src_win_iter); | |
1507 | if (!src_virt) { | |
1508 | ret = -ENOMEM; | |
1509 | goto error; | |
1510 | } | |
1511 | ||
1512 | if (dst_window->type == SCIF_WINDOW_SELF) | |
1513 | dst_virt = _get_local_va(dst_offset, dst_window, | |
1514 | loop_len); | |
1515 | else | |
1516 | dst_virt = ioremap_remote(dst_offset, dst_window, | |
1517 | loop_len, | |
1518 | work->remote_dev, | |
1519 | &dst_win_iter); | |
1520 | if (!dst_virt) { | |
1521 | if (src_window->type == SCIF_WINDOW_PEER) | |
1522 | iounmap_remote(src_virt, loop_len, work); | |
1523 | ret = -ENOMEM; | |
1524 | goto error; | |
1525 | } | |
1526 | ||
1527 | if (work->loopback) { | |
1528 | memcpy(dst_virt, src_virt, loop_len); | |
1529 | } else { | |
1530 | if (src_window->type == SCIF_WINDOW_SELF) | |
1531 | memcpy_toio((void __iomem __force *)dst_virt, | |
1532 | src_virt, loop_len); | |
1533 | else | |
1534 | memcpy_fromio(dst_virt, | |
1535 | (void __iomem __force *)src_virt, | |
1536 | loop_len); | |
1537 | } | |
1538 | if (src_window->type == SCIF_WINDOW_PEER) | |
1539 | iounmap_remote(src_virt, loop_len, work); | |
1540 | ||
1541 | if (dst_window->type == SCIF_WINDOW_PEER) | |
1542 | iounmap_remote(dst_virt, loop_len, work); | |
1543 | ||
1544 | src_offset += loop_len; | |
1545 | dst_offset += loop_len; | |
1546 | remaining_len -= loop_len; | |
1547 | if (remaining_len) { | |
1548 | end_src_offset = src_window->offset + | |
1549 | (src_window->nr_pages << PAGE_SHIFT); | |
1550 | end_dst_offset = dst_window->offset + | |
1551 | (dst_window->nr_pages << PAGE_SHIFT); | |
1552 | if (src_offset == end_src_offset) { | |
1553 | src_window = list_entry_next(src_window, list); | |
1554 | scif_init_window_iter(src_window, | |
1555 | &src_win_iter); | |
1556 | } | |
1557 | if (dst_offset == end_dst_offset) { | |
1558 | dst_window = list_entry_next(dst_window, list); | |
1559 | scif_init_window_iter(dst_window, | |
1560 | &dst_win_iter); | |
1561 | } | |
1562 | } | |
1563 | } | |
1564 | error: | |
1565 | return ret; | |
1566 | } | |
1567 | ||
1568 | static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd, | |
1569 | struct scif_copy_work *work, | |
1570 | struct dma_chan *chan, off_t loffset) | |
1571 | { | |
1572 | int src_cache_off, dst_cache_off; | |
1573 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1574 | u8 *temp = NULL; | |
1575 | bool src_local = true, dst_local = false; | |
1576 | struct scif_dma_comp_cb *comp_cb; | |
1577 | dma_addr_t src_dma_addr, dst_dma_addr; | |
1578 | int err; | |
1579 | ||
1580 | if (is_dma_copy_aligned(chan->device, 1, 1, 1)) | |
1581 | return _scif_rma_list_dma_copy_aligned(work, chan); | |
1582 | ||
1583 | src_cache_off = src_offset & (L1_CACHE_BYTES - 1); | |
1584 | dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1); | |
1585 | ||
1586 | if (dst_cache_off == src_cache_off) | |
1587 | return scif_rma_list_dma_copy_aligned(work, chan); | |
1588 | ||
1589 | if (work->loopback) | |
1590 | return scif_rma_list_cpu_copy(work); | |
1591 | src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset); | |
1592 | dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset); | |
1593 | src_local = work->src_window->type == SCIF_WINDOW_SELF; | |
1594 | dst_local = work->dst_window->type == SCIF_WINDOW_SELF; | |
1595 | ||
1596 | dst_local = dst_local; | |
1597 | /* Allocate dma_completion cb */ | |
1598 | comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL); | |
1599 | if (!comp_cb) | |
1600 | goto error; | |
1601 | ||
1602 | work->comp_cb = comp_cb; | |
1603 | comp_cb->cb_cookie = comp_cb; | |
1604 | comp_cb->dma_completion_func = &scif_rma_completion_cb; | |
1605 | ||
1606 | if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) { | |
1607 | comp_cb->is_cache = false; | |
1608 | /* Allocate padding bytes to align to a cache line */ | |
1609 | temp = kmalloc(work->len + (L1_CACHE_BYTES << 1), | |
1610 | GFP_KERNEL); | |
1611 | if (!temp) | |
1612 | goto free_comp_cb; | |
1613 | comp_cb->temp_buf_to_free = temp; | |
1614 | /* kmalloc(..) does not guarantee cache line alignment */ | |
1615 | if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES)) | |
1616 | temp = PTR_ALIGN(temp, L1_CACHE_BYTES); | |
1617 | } else { | |
1618 | comp_cb->is_cache = true; | |
1619 | temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL); | |
1620 | if (!temp) | |
1621 | goto free_comp_cb; | |
1622 | comp_cb->temp_buf_to_free = temp; | |
1623 | } | |
1624 | ||
1625 | if (src_local) { | |
1626 | temp += dst_cache_off; | |
1627 | scif_rma_local_cpu_copy(work->src_offset, work->src_window, | |
1628 | temp, work->len, true); | |
1629 | } else { | |
1630 | comp_cb->dst_window = work->dst_window; | |
1631 | comp_cb->dst_offset = work->dst_offset; | |
1632 | work->src_offset = work->src_offset - src_cache_off; | |
1633 | comp_cb->len = work->len; | |
1634 | work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES); | |
1635 | comp_cb->header_padding = src_cache_off; | |
1636 | } | |
1637 | comp_cb->temp_buf = temp; | |
1638 | ||
1639 | err = scif_map_single(&comp_cb->temp_phys, temp, | |
1640 | work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE); | |
1641 | if (err) | |
1642 | goto free_temp_buf; | |
1643 | comp_cb->sdev = work->remote_dev; | |
1644 | if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0) | |
1645 | goto free_temp_buf; | |
1646 | if (!src_local) | |
1647 | work->fence_type = SCIF_DMA_INTR; | |
1648 | return 0; | |
1649 | free_temp_buf: | |
1650 | if (comp_cb->is_cache) | |
1651 | kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free); | |
1652 | else | |
1653 | kfree(comp_cb->temp_buf_to_free); | |
1654 | free_comp_cb: | |
1655 | kfree(comp_cb); | |
1656 | error: | |
1657 | return -ENOMEM; | |
1658 | } | |
1659 | ||
1660 | /** | |
1661 | * scif_rma_copy: | |
1662 | * @epd: end point descriptor. | |
1663 | * @loffset: offset in local registered address space to/from which to copy | |
1664 | * @addr: user virtual address to/from which to copy | |
1665 | * @len: length of range to copy | |
1666 | * @roffset: offset in remote registered address space to/from which to copy | |
1667 | * @flags: flags | |
1668 | * @dir: LOCAL->REMOTE or vice versa. | |
1669 | * @last_chunk: true if this is the last chunk of a larger transfer | |
1670 | * | |
1671 | * Validate parameters, check if src/dst registered ranges requested for copy | |
1672 | * are valid and initiate either CPU or DMA copy. | |
1673 | */ | |
1674 | static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr, | |
1675 | size_t len, off_t roffset, int flags, | |
1676 | enum scif_rma_dir dir, bool last_chunk) | |
1677 | { | |
1678 | struct scif_endpt *ep = (struct scif_endpt *)epd; | |
1679 | struct scif_rma_req remote_req; | |
1680 | struct scif_rma_req req; | |
1681 | struct scif_window *local_window = NULL; | |
1682 | struct scif_window *remote_window = NULL; | |
1683 | struct scif_copy_work copy_work; | |
1684 | bool loopback; | |
1685 | int err = 0; | |
1686 | struct dma_chan *chan; | |
1687 | struct scif_mmu_notif *mmn = NULL; | |
1688 | bool cache = false; | |
1689 | struct device *spdev; | |
1690 | ||
1691 | err = scif_verify_epd(ep); | |
1692 | if (err) | |
1693 | return err; | |
1694 | ||
1695 | if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE | | |
1696 | SCIF_RMA_SYNC | SCIF_RMA_ORDERED))) | |
1697 | return -EINVAL; | |
1698 | ||
1699 | loopback = scifdev_self(ep->remote_dev) ? true : false; | |
1700 | copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ? | |
1701 | SCIF_DMA_POLL : 0; | |
1702 | copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk); | |
1703 | ||
1704 | /* Use CPU for Mgmt node <-> Mgmt node copies */ | |
1705 | if (loopback && scif_is_mgmt_node()) { | |
1706 | flags |= SCIF_RMA_USECPU; | |
1707 | copy_work.fence_type = 0x0; | |
1708 | } | |
1709 | ||
1710 | cache = scif_is_set_reg_cache(flags); | |
1711 | ||
1712 | remote_req.out_window = &remote_window; | |
1713 | remote_req.offset = roffset; | |
1714 | remote_req.nr_bytes = len; | |
1715 | /* | |
1716 | * If transfer is from local to remote then the remote window | |
1717 | * must be writeable and vice versa. | |
1718 | */ | |
1719 | remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ; | |
1720 | remote_req.type = SCIF_WINDOW_PARTIAL; | |
1721 | remote_req.head = &ep->rma_info.remote_reg_list; | |
1722 | ||
1723 | spdev = scif_get_peer_dev(ep->remote_dev); | |
1724 | if (IS_ERR(spdev)) { | |
1725 | err = PTR_ERR(spdev); | |
1726 | return err; | |
1727 | } | |
1728 | ||
1729 | if (addr && cache) { | |
1730 | mutex_lock(&ep->rma_info.mmn_lock); | |
1731 | mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); | |
1732 | if (!mmn) | |
1733 | scif_add_mmu_notifier(current->mm, ep); | |
1734 | mutex_unlock(&ep->rma_info.mmn_lock); | |
1735 | if (IS_ERR(mmn)) { | |
1736 | scif_put_peer_dev(spdev); | |
1737 | return PTR_ERR(mmn); | |
1738 | } | |
1739 | cache = cache && !scif_rma_tc_can_cache(ep, len); | |
1740 | } | |
1741 | mutex_lock(&ep->rma_info.rma_lock); | |
1742 | if (addr) { | |
1743 | req.out_window = &local_window; | |
1744 | req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK), | |
1745 | PAGE_SIZE); | |
1746 | req.va_for_temp = addr & PAGE_MASK; | |
1747 | req.prot = (dir == SCIF_LOCAL_TO_REMOTE ? | |
1748 | VM_READ : VM_WRITE | VM_READ); | |
1749 | /* Does a valid local window exist? */ | |
1750 | if (mmn) { | |
1751 | spin_lock(&ep->rma_info.tc_lock); | |
1752 | req.head = &mmn->tc_reg_list; | |
1753 | err = scif_query_tcw(ep, &req); | |
1754 | spin_unlock(&ep->rma_info.tc_lock); | |
1755 | } | |
1756 | if (!mmn || err) { | |
1757 | err = scif_register_temp(epd, req.va_for_temp, | |
1758 | req.nr_bytes, req.prot, | |
1759 | &loffset, &local_window); | |
1760 | if (err) { | |
1761 | mutex_unlock(&ep->rma_info.rma_lock); | |
1762 | goto error; | |
1763 | } | |
1764 | if (!cache) | |
1765 | goto skip_cache; | |
1766 | atomic_inc(&ep->rma_info.tcw_refcount); | |
1767 | atomic_add_return(local_window->nr_pages, | |
1768 | &ep->rma_info.tcw_total_pages); | |
1769 | if (mmn) { | |
1770 | spin_lock(&ep->rma_info.tc_lock); | |
1771 | scif_insert_tcw(local_window, | |
1772 | &mmn->tc_reg_list); | |
1773 | spin_unlock(&ep->rma_info.tc_lock); | |
1774 | } | |
1775 | } | |
1776 | skip_cache: | |
1777 | loffset = local_window->offset + | |
1778 | (addr - local_window->va_for_temp); | |
1779 | } else { | |
1780 | req.out_window = &local_window; | |
1781 | req.offset = loffset; | |
1782 | /* | |
1783 | * If transfer is from local to remote then the self window | |
1784 | * must be readable and vice versa. | |
1785 | */ | |
1786 | req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE; | |
1787 | req.nr_bytes = len; | |
1788 | req.type = SCIF_WINDOW_PARTIAL; | |
1789 | req.head = &ep->rma_info.reg_list; | |
1790 | /* Does a valid local window exist? */ | |
1791 | err = scif_query_window(&req); | |
1792 | if (err) { | |
1793 | mutex_unlock(&ep->rma_info.rma_lock); | |
1794 | goto error; | |
1795 | } | |
1796 | } | |
1797 | ||
1798 | /* Does a valid remote window exist? */ | |
1799 | err = scif_query_window(&remote_req); | |
1800 | if (err) { | |
1801 | mutex_unlock(&ep->rma_info.rma_lock); | |
1802 | goto error; | |
1803 | } | |
1804 | ||
1805 | /* | |
1806 | * Prepare copy_work for submitting work to the DMA kernel thread | |
1807 | * or CPU copy routine. | |
1808 | */ | |
1809 | copy_work.len = len; | |
1810 | copy_work.loopback = loopback; | |
1811 | copy_work.remote_dev = ep->remote_dev; | |
1812 | if (dir == SCIF_LOCAL_TO_REMOTE) { | |
1813 | copy_work.src_offset = loffset; | |
1814 | copy_work.src_window = local_window; | |
1815 | copy_work.dst_offset = roffset; | |
1816 | copy_work.dst_window = remote_window; | |
1817 | } else { | |
1818 | copy_work.src_offset = roffset; | |
1819 | copy_work.src_window = remote_window; | |
1820 | copy_work.dst_offset = loffset; | |
1821 | copy_work.dst_window = local_window; | |
1822 | } | |
1823 | ||
1824 | if (flags & SCIF_RMA_USECPU) { | |
1825 | scif_rma_list_cpu_copy(©_work); | |
1826 | } else { | |
1827 | chan = ep->rma_info.dma_chan; | |
1828 | err = scif_rma_list_dma_copy_wrapper(epd, ©_work, | |
1829 | chan, loffset); | |
1830 | } | |
1831 | if (addr && !cache) | |
1832 | atomic_inc(&ep->rma_info.tw_refcount); | |
1833 | ||
1834 | mutex_unlock(&ep->rma_info.rma_lock); | |
1835 | ||
1836 | if (last_chunk) { | |
1837 | struct scif_dev *rdev = ep->remote_dev; | |
1838 | ||
1839 | if (copy_work.fence_type == SCIF_DMA_POLL) | |
1840 | err = scif_drain_dma_poll(rdev->sdev, | |
1841 | ep->rma_info.dma_chan); | |
1842 | else if (copy_work.fence_type == SCIF_DMA_INTR) | |
1843 | err = scif_drain_dma_intr(rdev->sdev, | |
1844 | ep->rma_info.dma_chan); | |
1845 | } | |
1846 | ||
1847 | if (addr && !cache) | |
1848 | scif_queue_for_cleanup(local_window, &scif_info.rma); | |
1849 | scif_put_peer_dev(spdev); | |
1850 | return err; | |
1851 | error: | |
1852 | if (err) { | |
1853 | if (addr && local_window && !cache) | |
1854 | scif_destroy_window(ep, local_window); | |
1855 | dev_err(scif_info.mdev.this_device, | |
1856 | "%s %d err %d len 0x%lx\n", | |
1857 | __func__, __LINE__, err, len); | |
1858 | } | |
1859 | scif_put_peer_dev(spdev); | |
1860 | return err; | |
1861 | } | |
1862 | ||
1863 | int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, | |
1864 | off_t roffset, int flags) | |
1865 | { | |
1866 | int err; | |
1867 | ||
1868 | dev_dbg(scif_info.mdev.this_device, | |
1869 | "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n", | |
1870 | epd, loffset, len, roffset, flags); | |
1871 | if (scif_unaligned(loffset, roffset)) { | |
1872 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1873 | err = scif_rma_copy(epd, loffset, 0x0, | |
1874 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1875 | roffset, flags, | |
1876 | SCIF_REMOTE_TO_LOCAL, false); | |
1877 | if (err) | |
1878 | goto readfrom_err; | |
1879 | loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1880 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1881 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1882 | } | |
1883 | } | |
1884 | err = scif_rma_copy(epd, loffset, 0x0, len, | |
1885 | roffset, flags, SCIF_REMOTE_TO_LOCAL, true); | |
1886 | readfrom_err: | |
1887 | return err; | |
1888 | } | |
1889 | EXPORT_SYMBOL_GPL(scif_readfrom); | |
1890 | ||
1891 | int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, | |
1892 | off_t roffset, int flags) | |
1893 | { | |
1894 | int err; | |
1895 | ||
1896 | dev_dbg(scif_info.mdev.this_device, | |
1897 | "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n", | |
1898 | epd, loffset, len, roffset, flags); | |
1899 | if (scif_unaligned(loffset, roffset)) { | |
1900 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1901 | err = scif_rma_copy(epd, loffset, 0x0, | |
1902 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1903 | roffset, flags, | |
1904 | SCIF_LOCAL_TO_REMOTE, false); | |
1905 | if (err) | |
1906 | goto writeto_err; | |
1907 | loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1908 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1909 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1910 | } | |
1911 | } | |
1912 | err = scif_rma_copy(epd, loffset, 0x0, len, | |
1913 | roffset, flags, SCIF_LOCAL_TO_REMOTE, true); | |
1914 | writeto_err: | |
1915 | return err; | |
1916 | } | |
1917 | EXPORT_SYMBOL_GPL(scif_writeto); | |
1918 | ||
1919 | int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, | |
1920 | off_t roffset, int flags) | |
1921 | { | |
1922 | int err; | |
1923 | ||
1924 | dev_dbg(scif_info.mdev.this_device, | |
1925 | "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", | |
1926 | epd, addr, len, roffset, flags); | |
1927 | if (scif_unaligned((off_t __force)addr, roffset)) { | |
1928 | if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) | |
1929 | flags &= ~SCIF_RMA_USECACHE; | |
1930 | ||
1931 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1932 | err = scif_rma_copy(epd, 0, (u64)addr, | |
1933 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1934 | roffset, flags, | |
1935 | SCIF_REMOTE_TO_LOCAL, false); | |
1936 | if (err) | |
1937 | goto vreadfrom_err; | |
1938 | addr += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1939 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1940 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1941 | } | |
1942 | } | |
1943 | err = scif_rma_copy(epd, 0, (u64)addr, len, | |
1944 | roffset, flags, SCIF_REMOTE_TO_LOCAL, true); | |
1945 | vreadfrom_err: | |
1946 | return err; | |
1947 | } | |
1948 | EXPORT_SYMBOL_GPL(scif_vreadfrom); | |
1949 | ||
1950 | int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, | |
1951 | off_t roffset, int flags) | |
1952 | { | |
1953 | int err; | |
1954 | ||
1955 | dev_dbg(scif_info.mdev.this_device, | |
1956 | "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", | |
1957 | epd, addr, len, roffset, flags); | |
1958 | if (scif_unaligned((off_t __force)addr, roffset)) { | |
1959 | if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) | |
1960 | flags &= ~SCIF_RMA_USECACHE; | |
1961 | ||
1962 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1963 | err = scif_rma_copy(epd, 0, (u64)addr, | |
1964 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1965 | roffset, flags, | |
1966 | SCIF_LOCAL_TO_REMOTE, false); | |
1967 | if (err) | |
1968 | goto vwriteto_err; | |
1969 | addr += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1970 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1971 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1972 | } | |
1973 | } | |
1974 | err = scif_rma_copy(epd, 0, (u64)addr, len, | |
1975 | roffset, flags, SCIF_LOCAL_TO_REMOTE, true); | |
1976 | vwriteto_err: | |
1977 | return err; | |
1978 | } | |
1979 | EXPORT_SYMBOL_GPL(scif_vwriteto); |