Commit | Line | Data |
---|---|---|
7cc31cd2 SD |
1 | /* |
2 | * Intel MIC Platform Software Stack (MPSS) | |
3 | * | |
4 | * Copyright(c) 2015 Intel Corporation. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License, version 2, as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
13 | * General Public License for more details. | |
14 | * | |
15 | * Intel SCIF driver. | |
16 | * | |
17 | */ | |
18 | #include "scif_main.h" | |
19 | #include "scif_map.h" | |
20 | ||
21 | /* | |
22 | * struct scif_dma_comp_cb - SCIF DMA completion callback | |
23 | * | |
24 | * @dma_completion_func: DMA completion callback | |
25 | * @cb_cookie: DMA completion callback cookie | |
26 | * @temp_buf: Temporary buffer | |
27 | * @temp_buf_to_free: Temporary buffer to be freed | |
28 | * @is_cache: Is a kmem_cache allocated buffer | |
29 | * @dst_offset: Destination registration offset | |
30 | * @dst_window: Destination registration window | |
31 | * @len: Length of the temp buffer | |
32 | * @temp_phys: DMA address of the temp buffer | |
33 | * @sdev: The SCIF device | |
34 | * @header_padding: padding for cache line alignment | |
35 | */ | |
36 | struct scif_dma_comp_cb { | |
37 | void (*dma_completion_func)(void *cookie); | |
38 | void *cb_cookie; | |
39 | u8 *temp_buf; | |
40 | u8 *temp_buf_to_free; | |
41 | bool is_cache; | |
42 | s64 dst_offset; | |
43 | struct scif_window *dst_window; | |
44 | size_t len; | |
45 | dma_addr_t temp_phys; | |
46 | struct scif_dev *sdev; | |
47 | int header_padding; | |
48 | }; | |
49 | ||
50 | /** | |
51 | * struct scif_copy_work - Work for DMA copy | |
52 | * | |
53 | * @src_offset: Starting source offset | |
54 | * @dst_offset: Starting destination offset | |
55 | * @src_window: Starting src registered window | |
56 | * @dst_window: Starting dst registered window | |
57 | * @loopback: true if this is a loopback DMA transfer | |
58 | * @len: Length of the transfer | |
59 | * @comp_cb: DMA copy completion callback | |
60 | * @remote_dev: The remote SCIF peer device | |
61 | * @fence_type: polling or interrupt based | |
62 | * @ordered: is this a tail byte ordered DMA transfer | |
63 | */ | |
64 | struct scif_copy_work { | |
65 | s64 src_offset; | |
66 | s64 dst_offset; | |
67 | struct scif_window *src_window; | |
68 | struct scif_window *dst_window; | |
69 | int loopback; | |
70 | size_t len; | |
71 | struct scif_dma_comp_cb *comp_cb; | |
72 | struct scif_dev *remote_dev; | |
73 | int fence_type; | |
74 | bool ordered; | |
75 | }; | |
76 | ||
7cc31cd2 SD |
77 | /** |
78 | * scif_reserve_dma_chan: | |
79 | * @ep: Endpoint Descriptor. | |
80 | * | |
81 | * This routine reserves a DMA channel for a particular | |
82 | * endpoint. All DMA transfers for an endpoint are always | |
83 | * programmed on the same DMA channel. | |
84 | */ | |
85 | int scif_reserve_dma_chan(struct scif_endpt *ep) | |
86 | { | |
87 | int err = 0; | |
88 | struct scif_dev *scifdev; | |
89 | struct scif_hw_dev *sdev; | |
90 | struct dma_chan *chan; | |
91 | ||
92 | /* Loopback DMAs are not supported on the management node */ | |
93 | if (!scif_info.nodeid && scifdev_self(ep->remote_dev)) | |
94 | return 0; | |
95 | if (scif_info.nodeid) | |
96 | scifdev = &scif_dev[0]; | |
97 | else | |
98 | scifdev = ep->remote_dev; | |
99 | sdev = scifdev->sdev; | |
100 | if (!sdev->num_dma_ch) | |
101 | return -ENODEV; | |
102 | chan = sdev->dma_ch[scifdev->dma_ch_idx]; | |
103 | scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch; | |
104 | mutex_lock(&ep->rma_info.rma_lock); | |
105 | ep->rma_info.dma_chan = chan; | |
106 | mutex_unlock(&ep->rma_info.rma_lock); | |
107 | return err; | |
108 | } | |
109 | ||
110 | #ifdef CONFIG_MMU_NOTIFIER | |
111 | /** | |
112 | * scif_rma_destroy_tcw: | |
113 | * | |
114 | * This routine destroys temporary cached windows | |
115 | */ | |
116 | static | |
117 | void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, | |
118 | struct scif_endpt *ep, | |
119 | u64 start, u64 len) | |
120 | { | |
121 | struct list_head *item, *tmp; | |
122 | struct scif_window *window; | |
123 | u64 start_va, end_va; | |
124 | u64 end = start + len; | |
125 | ||
126 | if (end <= start) | |
127 | return; | |
128 | ||
129 | list_for_each_safe(item, tmp, &mmn->tc_reg_list) { | |
130 | window = list_entry(item, struct scif_window, list); | |
131 | ep = (struct scif_endpt *)window->ep; | |
132 | if (!len) | |
133 | break; | |
134 | start_va = window->va_for_temp; | |
135 | end_va = start_va + (window->nr_pages << PAGE_SHIFT); | |
136 | if (start < start_va && end <= start_va) | |
137 | break; | |
138 | if (start >= end_va) | |
139 | continue; | |
140 | __scif_rma_destroy_tcw_helper(window); | |
141 | } | |
142 | } | |
143 | ||
144 | static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len) | |
145 | { | |
146 | struct scif_endpt *ep = mmn->ep; | |
147 | ||
148 | spin_lock(&ep->rma_info.tc_lock); | |
149 | __scif_rma_destroy_tcw(mmn, ep, start, len); | |
150 | spin_unlock(&ep->rma_info.tc_lock); | |
151 | } | |
152 | ||
153 | static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep) | |
154 | { | |
155 | struct list_head *item, *tmp; | |
156 | struct scif_mmu_notif *mmn; | |
157 | ||
158 | list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { | |
159 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
160 | scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); | |
161 | } | |
162 | } | |
163 | ||
164 | static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep) | |
165 | { | |
166 | struct list_head *item, *tmp; | |
167 | struct scif_mmu_notif *mmn; | |
168 | ||
169 | spin_lock(&ep->rma_info.tc_lock); | |
170 | list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { | |
171 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
172 | __scif_rma_destroy_tcw(mmn, ep, 0, ULONG_MAX); | |
173 | } | |
174 | spin_unlock(&ep->rma_info.tc_lock); | |
175 | } | |
176 | ||
177 | static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) | |
178 | { | |
179 | if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit) | |
180 | return false; | |
181 | if ((atomic_read(&ep->rma_info.tcw_total_pages) | |
182 | + (cur_bytes >> PAGE_SHIFT)) > | |
183 | scif_info.rma_tc_limit) { | |
184 | dev_info(scif_info.mdev.this_device, | |
185 | "%s %d total=%d, current=%zu reached max\n", | |
186 | __func__, __LINE__, | |
187 | atomic_read(&ep->rma_info.tcw_total_pages), | |
188 | (1 + (cur_bytes >> PAGE_SHIFT))); | |
189 | scif_rma_destroy_tcw_invalid(); | |
190 | __scif_rma_destroy_tcw_ep(ep); | |
191 | } | |
192 | return true; | |
193 | } | |
194 | ||
195 | static void scif_mmu_notifier_release(struct mmu_notifier *mn, | |
196 | struct mm_struct *mm) | |
197 | { | |
198 | struct scif_mmu_notif *mmn; | |
199 | ||
200 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | |
201 | scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); | |
202 | schedule_work(&scif_info.misc_work); | |
203 | } | |
204 | ||
205 | static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn, | |
206 | struct mm_struct *mm, | |
207 | unsigned long address) | |
208 | { | |
209 | struct scif_mmu_notif *mmn; | |
210 | ||
211 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | |
212 | scif_rma_destroy_tcw(mmn, address, PAGE_SIZE); | |
213 | } | |
214 | ||
215 | static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, | |
216 | struct mm_struct *mm, | |
217 | unsigned long start, | |
218 | unsigned long end) | |
219 | { | |
220 | struct scif_mmu_notif *mmn; | |
221 | ||
222 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); | |
223 | scif_rma_destroy_tcw(mmn, start, end - start); | |
224 | } | |
225 | ||
226 | static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, | |
227 | struct mm_struct *mm, | |
228 | unsigned long start, | |
229 | unsigned long end) | |
230 | { | |
231 | /* | |
232 | * Nothing to do here, everything needed was done in | |
233 | * invalidate_range_start. | |
234 | */ | |
235 | } | |
236 | ||
237 | static const struct mmu_notifier_ops scif_mmu_notifier_ops = { | |
238 | .release = scif_mmu_notifier_release, | |
239 | .clear_flush_young = NULL, | |
240 | .invalidate_page = scif_mmu_notifier_invalidate_page, | |
241 | .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, | |
242 | .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; | |
243 | ||
244 | static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep) | |
245 | { | |
246 | struct scif_endpt_rma_info *rma = &ep->rma_info; | |
247 | struct scif_mmu_notif *mmn = NULL; | |
248 | struct list_head *item, *tmp; | |
249 | ||
250 | mutex_lock(&ep->rma_info.mmn_lock); | |
251 | list_for_each_safe(item, tmp, &rma->mmn_list) { | |
252 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
253 | mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm); | |
254 | list_del(item); | |
255 | kfree(mmn); | |
256 | } | |
257 | mutex_unlock(&ep->rma_info.mmn_lock); | |
258 | } | |
259 | ||
260 | static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn, | |
261 | struct mm_struct *mm, struct scif_endpt *ep) | |
262 | { | |
263 | mmn->ep = ep; | |
264 | mmn->mm = mm; | |
265 | mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops; | |
266 | INIT_LIST_HEAD(&mmn->list); | |
267 | INIT_LIST_HEAD(&mmn->tc_reg_list); | |
268 | } | |
269 | ||
270 | static struct scif_mmu_notif * | |
271 | scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) | |
272 | { | |
273 | struct scif_mmu_notif *mmn; | |
274 | struct list_head *item; | |
275 | ||
276 | list_for_each(item, &rma->mmn_list) { | |
277 | mmn = list_entry(item, struct scif_mmu_notif, list); | |
278 | if (mmn->mm == mm) | |
279 | return mmn; | |
280 | } | |
281 | return NULL; | |
282 | } | |
283 | ||
284 | static struct scif_mmu_notif * | |
285 | scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) | |
286 | { | |
287 | struct scif_mmu_notif *mmn | |
288 | = kzalloc(sizeof(*mmn), GFP_KERNEL); | |
289 | ||
290 | if (!mmn) | |
291 | return ERR_PTR(ENOMEM); | |
292 | ||
293 | scif_init_mmu_notifier(mmn, current->mm, ep); | |
294 | if (mmu_notifier_register(&mmn->ep_mmu_notifier, | |
295 | current->mm)) { | |
296 | kfree(mmn); | |
297 | return ERR_PTR(EBUSY); | |
298 | } | |
299 | list_add(&mmn->list, &ep->rma_info.mmn_list); | |
300 | return mmn; | |
301 | } | |
302 | ||
303 | /* | |
304 | * Called from the misc thread to destroy temporary cached windows and | |
305 | * unregister the MMU notifier for the SCIF endpoint. | |
306 | */ | |
307 | void scif_mmu_notif_handler(struct work_struct *work) | |
308 | { | |
309 | struct list_head *pos, *tmpq; | |
310 | struct scif_endpt *ep; | |
311 | restart: | |
312 | scif_rma_destroy_tcw_invalid(); | |
313 | spin_lock(&scif_info.rmalock); | |
314 | list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) { | |
315 | ep = list_entry(pos, struct scif_endpt, mmu_list); | |
316 | list_del(&ep->mmu_list); | |
317 | spin_unlock(&scif_info.rmalock); | |
318 | scif_rma_destroy_tcw_ep(ep); | |
319 | scif_ep_unregister_mmu_notifier(ep); | |
320 | goto restart; | |
321 | } | |
322 | spin_unlock(&scif_info.rmalock); | |
323 | } | |
324 | ||
325 | static bool scif_is_set_reg_cache(int flags) | |
326 | { | |
327 | return !!(flags & SCIF_RMA_USECACHE); | |
328 | } | |
329 | #else | |
330 | static struct scif_mmu_notif * | |
331 | scif_find_mmu_notifier(struct mm_struct *mm, | |
332 | struct scif_endpt_rma_info *rma) | |
333 | { | |
334 | return NULL; | |
335 | } | |
336 | ||
337 | static struct scif_mmu_notif * | |
338 | scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) | |
339 | { | |
340 | return NULL; | |
341 | } | |
342 | ||
343 | void scif_mmu_notif_handler(struct work_struct *work) | |
344 | { | |
345 | } | |
346 | ||
347 | static bool scif_is_set_reg_cache(int flags) | |
348 | { | |
349 | return false; | |
350 | } | |
351 | ||
352 | static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) | |
353 | { | |
354 | return false; | |
355 | } | |
356 | #endif | |
357 | ||
358 | /** | |
359 | * scif_register_temp: | |
360 | * @epd: End Point Descriptor. | |
361 | * @addr: virtual address to/from which to copy | |
362 | * @len: length of range to copy | |
363 | * @out_offset: computed offset returned by reference. | |
364 | * @out_window: allocated registered window returned by reference. | |
365 | * | |
366 | * Create a temporary registered window. The peer will not know about this | |
367 | * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's. | |
368 | */ | |
369 | static int | |
370 | scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot, | |
371 | off_t *out_offset, struct scif_window **out_window) | |
372 | { | |
373 | struct scif_endpt *ep = (struct scif_endpt *)epd; | |
374 | int err; | |
375 | scif_pinned_pages_t pinned_pages; | |
376 | size_t aligned_len; | |
377 | ||
378 | aligned_len = ALIGN(len, PAGE_SIZE); | |
379 | ||
380 | err = __scif_pin_pages((void *)(addr & PAGE_MASK), | |
381 | aligned_len, &prot, 0, &pinned_pages); | |
382 | if (err) | |
383 | return err; | |
384 | ||
385 | pinned_pages->prot = prot; | |
386 | ||
387 | /* Compute the offset for this registration */ | |
388 | err = scif_get_window_offset(ep, 0, 0, | |
389 | aligned_len >> PAGE_SHIFT, | |
390 | (s64 *)out_offset); | |
391 | if (err) | |
392 | goto error_unpin; | |
393 | ||
394 | /* Allocate and prepare self registration window */ | |
395 | *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT, | |
396 | *out_offset, true); | |
397 | if (!*out_window) { | |
398 | scif_free_window_offset(ep, NULL, *out_offset); | |
399 | err = -ENOMEM; | |
400 | goto error_unpin; | |
401 | } | |
402 | ||
403 | (*out_window)->pinned_pages = pinned_pages; | |
404 | (*out_window)->nr_pages = pinned_pages->nr_pages; | |
405 | (*out_window)->prot = pinned_pages->prot; | |
406 | ||
407 | (*out_window)->va_for_temp = addr & PAGE_MASK; | |
408 | err = scif_map_window(ep->remote_dev, *out_window); | |
409 | if (err) { | |
410 | /* Something went wrong! Rollback */ | |
411 | scif_destroy_window(ep, *out_window); | |
412 | *out_window = NULL; | |
413 | } else { | |
414 | *out_offset |= (addr - (*out_window)->va_for_temp); | |
415 | } | |
416 | return err; | |
417 | error_unpin: | |
418 | if (err) | |
419 | dev_err(&ep->remote_dev->sdev->dev, | |
420 | "%s %d err %d\n", __func__, __LINE__, err); | |
421 | scif_unpin_pages(pinned_pages); | |
422 | return err; | |
423 | } | |
424 | ||
425 | #define SCIF_DMA_TO (3 * HZ) | |
426 | ||
427 | /* | |
428 | * scif_sync_dma - Program a DMA without an interrupt descriptor | |
429 | * | |
430 | * @dev - The address of the pointer to the device instance used | |
431 | * for DMA registration. | |
432 | * @chan - DMA channel to be used. | |
433 | * @sync_wait: Wait for DMA to complete? | |
434 | * | |
435 | * Return 0 on success and -errno on error. | |
436 | */ | |
437 | static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan, | |
438 | bool sync_wait) | |
439 | { | |
440 | int err = 0; | |
441 | struct dma_async_tx_descriptor *tx = NULL; | |
442 | enum dma_ctrl_flags flags = DMA_PREP_FENCE; | |
443 | dma_cookie_t cookie; | |
444 | struct dma_device *ddev; | |
445 | ||
446 | if (!chan) { | |
447 | err = -EIO; | |
448 | dev_err(&sdev->dev, "%s %d err %d\n", | |
449 | __func__, __LINE__, err); | |
450 | return err; | |
451 | } | |
452 | ddev = chan->device; | |
453 | ||
454 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); | |
455 | if (!tx) { | |
456 | err = -ENOMEM; | |
457 | dev_err(&sdev->dev, "%s %d err %d\n", | |
458 | __func__, __LINE__, err); | |
459 | goto release; | |
460 | } | |
461 | cookie = tx->tx_submit(tx); | |
462 | ||
463 | if (dma_submit_error(cookie)) { | |
464 | err = -ENOMEM; | |
465 | dev_err(&sdev->dev, "%s %d err %d\n", | |
466 | __func__, __LINE__, err); | |
467 | goto release; | |
468 | } | |
469 | if (!sync_wait) { | |
470 | dma_async_issue_pending(chan); | |
471 | } else { | |
472 | if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) { | |
473 | err = 0; | |
474 | } else { | |
475 | err = -EIO; | |
476 | dev_err(&sdev->dev, "%s %d err %d\n", | |
477 | __func__, __LINE__, err); | |
478 | } | |
479 | } | |
480 | release: | |
481 | return err; | |
482 | } | |
483 | ||
484 | static void scif_dma_callback(void *arg) | |
485 | { | |
486 | struct completion *done = (struct completion *)arg; | |
487 | ||
488 | complete(done); | |
489 | } | |
490 | ||
491 | #define SCIF_DMA_SYNC_WAIT true | |
492 | #define SCIF_DMA_POLL BIT(0) | |
493 | #define SCIF_DMA_INTR BIT(1) | |
494 | ||
495 | /* | |
496 | * scif_async_dma - Program a DMA with an interrupt descriptor | |
497 | * | |
498 | * @dev - The address of the pointer to the device instance used | |
499 | * for DMA registration. | |
500 | * @chan - DMA channel to be used. | |
501 | * Return 0 on success and -errno on error. | |
502 | */ | |
503 | static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan) | |
504 | { | |
505 | int err = 0; | |
506 | struct dma_device *ddev; | |
507 | struct dma_async_tx_descriptor *tx = NULL; | |
508 | enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; | |
509 | DECLARE_COMPLETION_ONSTACK(done_wait); | |
510 | dma_cookie_t cookie; | |
511 | enum dma_status status; | |
512 | ||
513 | if (!chan) { | |
514 | err = -EIO; | |
515 | dev_err(&sdev->dev, "%s %d err %d\n", | |
516 | __func__, __LINE__, err); | |
517 | return err; | |
518 | } | |
519 | ddev = chan->device; | |
520 | ||
521 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); | |
522 | if (!tx) { | |
523 | err = -ENOMEM; | |
524 | dev_err(&sdev->dev, "%s %d err %d\n", | |
525 | __func__, __LINE__, err); | |
526 | goto release; | |
527 | } | |
528 | reinit_completion(&done_wait); | |
529 | tx->callback = scif_dma_callback; | |
530 | tx->callback_param = &done_wait; | |
531 | cookie = tx->tx_submit(tx); | |
532 | ||
533 | if (dma_submit_error(cookie)) { | |
534 | err = -ENOMEM; | |
535 | dev_err(&sdev->dev, "%s %d err %d\n", | |
536 | __func__, __LINE__, err); | |
537 | goto release; | |
538 | } | |
539 | dma_async_issue_pending(chan); | |
540 | ||
541 | err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO); | |
542 | if (!err) { | |
543 | err = -EIO; | |
544 | dev_err(&sdev->dev, "%s %d err %d\n", | |
545 | __func__, __LINE__, err); | |
546 | goto release; | |
547 | } | |
548 | err = 0; | |
549 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
550 | if (status != DMA_COMPLETE) { | |
551 | err = -EIO; | |
552 | dev_err(&sdev->dev, "%s %d err %d\n", | |
553 | __func__, __LINE__, err); | |
554 | goto release; | |
555 | } | |
556 | release: | |
557 | return err; | |
558 | } | |
559 | ||
560 | /* | |
561 | * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular | |
562 | * DMA channel via polling. | |
563 | * | |
564 | * @sdev - The SCIF device | |
565 | * @chan - DMA channel | |
566 | * Return 0 on success and -errno on error. | |
567 | */ | |
568 | static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan) | |
569 | { | |
570 | if (!chan) | |
571 | return -EINVAL; | |
572 | return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT); | |
573 | } | |
574 | ||
575 | /* | |
576 | * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular | |
577 | * DMA channel via interrupt based blocking wait. | |
578 | * | |
579 | * @sdev - The SCIF device | |
580 | * @chan - DMA channel | |
581 | * Return 0 on success and -errno on error. | |
582 | */ | |
583 | int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan) | |
584 | { | |
585 | if (!chan) | |
586 | return -EINVAL; | |
587 | return scif_async_dma(sdev, chan); | |
588 | } | |
589 | ||
590 | /** | |
591 | * scif_rma_destroy_windows: | |
592 | * | |
593 | * This routine destroys all windows queued for cleanup | |
594 | */ | |
595 | void scif_rma_destroy_windows(void) | |
596 | { | |
597 | struct list_head *item, *tmp; | |
598 | struct scif_window *window; | |
599 | struct scif_endpt *ep; | |
600 | struct dma_chan *chan; | |
601 | ||
602 | might_sleep(); | |
603 | restart: | |
604 | spin_lock(&scif_info.rmalock); | |
605 | list_for_each_safe(item, tmp, &scif_info.rma) { | |
606 | window = list_entry(item, struct scif_window, | |
607 | list); | |
608 | ep = (struct scif_endpt *)window->ep; | |
609 | chan = ep->rma_info.dma_chan; | |
610 | ||
611 | list_del_init(&window->list); | |
612 | spin_unlock(&scif_info.rmalock); | |
613 | if (!chan || !scifdev_alive(ep) || | |
614 | !scif_drain_dma_intr(ep->remote_dev->sdev, | |
615 | ep->rma_info.dma_chan)) | |
616 | /* Remove window from global list */ | |
617 | window->unreg_state = OP_COMPLETED; | |
618 | else | |
619 | dev_warn(&ep->remote_dev->sdev->dev, | |
620 | "DMA engine hung?\n"); | |
621 | if (window->unreg_state == OP_COMPLETED) { | |
622 | if (window->type == SCIF_WINDOW_SELF) | |
623 | scif_destroy_window(ep, window); | |
624 | else | |
625 | scif_destroy_remote_window(window); | |
626 | atomic_dec(&ep->rma_info.tw_refcount); | |
627 | } | |
628 | goto restart; | |
629 | } | |
630 | spin_unlock(&scif_info.rmalock); | |
631 | } | |
632 | ||
633 | /** | |
634 | * scif_rma_destroy_tcw: | |
635 | * | |
636 | * This routine destroys temporary cached registered windows | |
637 | * which have been queued for cleanup. | |
638 | */ | |
639 | void scif_rma_destroy_tcw_invalid(void) | |
640 | { | |
641 | struct list_head *item, *tmp; | |
642 | struct scif_window *window; | |
643 | struct scif_endpt *ep; | |
644 | struct dma_chan *chan; | |
645 | ||
646 | might_sleep(); | |
647 | restart: | |
648 | spin_lock(&scif_info.rmalock); | |
649 | list_for_each_safe(item, tmp, &scif_info.rma_tc) { | |
650 | window = list_entry(item, struct scif_window, list); | |
651 | ep = (struct scif_endpt *)window->ep; | |
652 | chan = ep->rma_info.dma_chan; | |
653 | list_del_init(&window->list); | |
654 | spin_unlock(&scif_info.rmalock); | |
655 | mutex_lock(&ep->rma_info.rma_lock); | |
656 | if (!chan || !scifdev_alive(ep) || | |
657 | !scif_drain_dma_intr(ep->remote_dev->sdev, | |
658 | ep->rma_info.dma_chan)) { | |
659 | atomic_sub(window->nr_pages, | |
660 | &ep->rma_info.tcw_total_pages); | |
661 | scif_destroy_window(ep, window); | |
662 | atomic_dec(&ep->rma_info.tcw_refcount); | |
663 | } else { | |
664 | dev_warn(&ep->remote_dev->sdev->dev, | |
665 | "DMA engine hung?\n"); | |
666 | } | |
667 | mutex_unlock(&ep->rma_info.rma_lock); | |
668 | goto restart; | |
669 | } | |
670 | spin_unlock(&scif_info.rmalock); | |
671 | } | |
672 | ||
673 | static inline | |
674 | void *_get_local_va(off_t off, struct scif_window *window, size_t len) | |
675 | { | |
676 | int page_nr = (off - window->offset) >> PAGE_SHIFT; | |
677 | off_t page_off = off & ~PAGE_MASK; | |
678 | void *va = NULL; | |
679 | ||
680 | if (window->type == SCIF_WINDOW_SELF) { | |
681 | struct page **pages = window->pinned_pages->pages; | |
682 | ||
683 | va = page_address(pages[page_nr]) + page_off; | |
684 | } | |
685 | return va; | |
686 | } | |
687 | ||
688 | static inline | |
689 | void *ioremap_remote(off_t off, struct scif_window *window, | |
690 | size_t len, struct scif_dev *dev, | |
691 | struct scif_window_iter *iter) | |
692 | { | |
693 | dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter); | |
694 | ||
695 | /* | |
696 | * If the DMA address is not card relative then we need the DMA | |
697 | * addresses to be an offset into the bar. The aperture base was already | |
698 | * added so subtract it here since scif_ioremap is going to add it again | |
699 | */ | |
700 | if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER && | |
701 | dev->sdev->aper && !dev->sdev->card_rel_da) | |
702 | phys = phys - dev->sdev->aper->pa; | |
703 | return scif_ioremap(phys, len, dev); | |
704 | } | |
705 | ||
706 | static inline void | |
707 | iounmap_remote(void *virt, size_t size, struct scif_copy_work *work) | |
708 | { | |
709 | scif_iounmap(virt, size, work->remote_dev); | |
710 | } | |
711 | ||
712 | /* | |
713 | * Takes care of ordering issue caused by | |
714 | * 1. Hardware: Only in the case of cpu copy from mgmt node to card | |
715 | * because of WC memory. | |
716 | * 2. Software: If memcpy reorders copy instructions for optimization. | |
717 | * This could happen at both mgmt node and card. | |
718 | */ | |
719 | static inline void | |
720 | scif_ordered_memcpy_toio(char *dst, const char *src, size_t count) | |
721 | { | |
722 | if (!count) | |
723 | return; | |
724 | ||
725 | memcpy_toio((void __iomem __force *)dst, src, --count); | |
726 | /* Order the last byte with the previous stores */ | |
727 | wmb(); | |
728 | *(dst + count) = *(src + count); | |
729 | } | |
730 | ||
731 | static inline void scif_unaligned_cpy_toio(char *dst, const char *src, | |
732 | size_t count, bool ordered) | |
733 | { | |
734 | if (ordered) | |
735 | scif_ordered_memcpy_toio(dst, src, count); | |
736 | else | |
737 | memcpy_toio((void __iomem __force *)dst, src, count); | |
738 | } | |
739 | ||
740 | static inline | |
741 | void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count) | |
742 | { | |
743 | if (!count) | |
744 | return; | |
745 | ||
746 | memcpy_fromio(dst, (void __iomem __force *)src, --count); | |
747 | /* Order the last byte with the previous loads */ | |
748 | rmb(); | |
749 | *(dst + count) = *(src + count); | |
750 | } | |
751 | ||
752 | static inline void scif_unaligned_cpy_fromio(char *dst, const char *src, | |
753 | size_t count, bool ordered) | |
754 | { | |
755 | if (ordered) | |
756 | scif_ordered_memcpy_fromio(dst, src, count); | |
757 | else | |
758 | memcpy_fromio(dst, (void __iomem __force *)src, count); | |
759 | } | |
760 | ||
761 | #define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0) | |
762 | ||
763 | /* | |
764 | * scif_off_to_dma_addr: | |
765 | * Obtain the dma_addr given the window and the offset. | |
766 | * @window: Registered window. | |
767 | * @off: Window offset. | |
768 | * @nr_bytes: Return the number of contiguous bytes till next DMA addr index. | |
769 | * @index: Return the index of the dma_addr array found. | |
770 | * @start_off: start offset of index of the dma addr array found. | |
771 | * The nr_bytes provides the callee an estimate of the maximum possible | |
772 | * DMA xfer possible while the index/start_off provide faster lookups | |
773 | * for the next iteration. | |
774 | */ | |
775 | dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off, | |
776 | size_t *nr_bytes, struct scif_window_iter *iter) | |
777 | { | |
778 | int i, page_nr; | |
779 | s64 start, end; | |
780 | off_t page_off; | |
781 | ||
782 | if (window->nr_pages == window->nr_contig_chunks) { | |
783 | page_nr = (off - window->offset) >> PAGE_SHIFT; | |
784 | page_off = off & ~PAGE_MASK; | |
785 | ||
786 | if (nr_bytes) | |
787 | *nr_bytes = PAGE_SIZE - page_off; | |
788 | return window->dma_addr[page_nr] | page_off; | |
789 | } | |
790 | if (iter) { | |
791 | i = iter->index; | |
792 | start = iter->offset; | |
793 | } else { | |
794 | i = 0; | |
795 | start = window->offset; | |
796 | } | |
797 | for (; i < window->nr_contig_chunks; i++) { | |
798 | end = start + (window->num_pages[i] << PAGE_SHIFT); | |
799 | if (off >= start && off < end) { | |
800 | if (iter) { | |
801 | iter->index = i; | |
802 | iter->offset = start; | |
803 | } | |
804 | if (nr_bytes) | |
805 | *nr_bytes = end - off; | |
806 | return (window->dma_addr[i] + (off - start)); | |
807 | } | |
808 | start += (window->num_pages[i] << PAGE_SHIFT); | |
809 | } | |
810 | dev_err(scif_info.mdev.this_device, | |
811 | "%s %d BUG. Addr not found? window %p off 0x%llx\n", | |
812 | __func__, __LINE__, window, off); | |
813 | return SCIF_RMA_ERROR_CODE; | |
814 | } | |
815 | ||
816 | /* | |
817 | * Copy between rma window and temporary buffer | |
818 | */ | |
819 | static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window, | |
820 | u8 *temp, size_t rem_len, bool to_temp) | |
821 | { | |
822 | void *window_virt; | |
823 | size_t loop_len; | |
824 | int offset_in_page; | |
825 | s64 end_offset; | |
826 | ||
827 | offset_in_page = offset & ~PAGE_MASK; | |
828 | loop_len = PAGE_SIZE - offset_in_page; | |
829 | ||
830 | if (rem_len < loop_len) | |
831 | loop_len = rem_len; | |
832 | ||
833 | window_virt = _get_local_va(offset, window, loop_len); | |
834 | if (!window_virt) | |
835 | return; | |
836 | if (to_temp) | |
837 | memcpy(temp, window_virt, loop_len); | |
838 | else | |
839 | memcpy(window_virt, temp, loop_len); | |
840 | ||
841 | offset += loop_len; | |
842 | temp += loop_len; | |
843 | rem_len -= loop_len; | |
844 | ||
845 | end_offset = window->offset + | |
846 | (window->nr_pages << PAGE_SHIFT); | |
847 | while (rem_len) { | |
848 | if (offset == end_offset) { | |
0d0ce9c0 | 849 | window = list_next_entry(window, list); |
7cc31cd2 SD |
850 | end_offset = window->offset + |
851 | (window->nr_pages << PAGE_SHIFT); | |
852 | } | |
853 | loop_len = min(PAGE_SIZE, rem_len); | |
854 | window_virt = _get_local_va(offset, window, loop_len); | |
855 | if (!window_virt) | |
856 | return; | |
857 | if (to_temp) | |
858 | memcpy(temp, window_virt, loop_len); | |
859 | else | |
860 | memcpy(window_virt, temp, loop_len); | |
861 | offset += loop_len; | |
862 | temp += loop_len; | |
863 | rem_len -= loop_len; | |
864 | } | |
865 | } | |
866 | ||
867 | /** | |
868 | * scif_rma_completion_cb: | |
869 | * @data: RMA cookie | |
870 | * | |
871 | * RMA interrupt completion callback. | |
872 | */ | |
873 | static void scif_rma_completion_cb(void *data) | |
874 | { | |
875 | struct scif_dma_comp_cb *comp_cb = data; | |
876 | ||
877 | /* Free DMA Completion CB. */ | |
878 | if (comp_cb->dst_window) | |
879 | scif_rma_local_cpu_copy(comp_cb->dst_offset, | |
880 | comp_cb->dst_window, | |
881 | comp_cb->temp_buf + | |
882 | comp_cb->header_padding, | |
883 | comp_cb->len, false); | |
884 | scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev, | |
885 | SCIF_KMEM_UNALIGNED_BUF_SIZE); | |
886 | if (comp_cb->is_cache) | |
887 | kmem_cache_free(unaligned_cache, | |
888 | comp_cb->temp_buf_to_free); | |
889 | else | |
890 | kfree(comp_cb->temp_buf_to_free); | |
891 | } | |
892 | ||
893 | /* Copies between temporary buffer and offsets provided in work */ | |
894 | static int | |
895 | scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, | |
896 | u8 *temp, struct dma_chan *chan, | |
897 | bool src_local) | |
898 | { | |
899 | struct scif_dma_comp_cb *comp_cb = work->comp_cb; | |
900 | dma_addr_t window_dma_addr, temp_dma_addr; | |
901 | dma_addr_t temp_phys = comp_cb->temp_phys; | |
902 | size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len; | |
903 | int offset_in_ca, ret = 0; | |
904 | s64 end_offset, offset; | |
905 | struct scif_window *window; | |
906 | void *window_virt_addr; | |
907 | size_t tail_len; | |
908 | struct dma_async_tx_descriptor *tx; | |
909 | struct dma_device *dev = chan->device; | |
910 | dma_cookie_t cookie; | |
911 | ||
912 | if (src_local) { | |
913 | offset = work->dst_offset; | |
914 | window = work->dst_window; | |
915 | } else { | |
916 | offset = work->src_offset; | |
917 | window = work->src_window; | |
918 | } | |
919 | ||
920 | offset_in_ca = offset & (L1_CACHE_BYTES - 1); | |
921 | if (offset_in_ca) { | |
922 | loop_len = L1_CACHE_BYTES - offset_in_ca; | |
923 | loop_len = min(loop_len, remaining_len); | |
924 | window_virt_addr = ioremap_remote(offset, window, | |
925 | loop_len, | |
926 | work->remote_dev, | |
927 | NULL); | |
928 | if (!window_virt_addr) | |
929 | return -ENOMEM; | |
930 | if (src_local) | |
931 | scif_unaligned_cpy_toio(window_virt_addr, temp, | |
932 | loop_len, | |
933 | work->ordered && | |
934 | !(remaining_len - loop_len)); | |
935 | else | |
936 | scif_unaligned_cpy_fromio(temp, window_virt_addr, | |
937 | loop_len, work->ordered && | |
938 | !(remaining_len - loop_len)); | |
939 | iounmap_remote(window_virt_addr, loop_len, work); | |
940 | ||
941 | offset += loop_len; | |
942 | temp += loop_len; | |
943 | temp_phys += loop_len; | |
944 | remaining_len -= loop_len; | |
945 | } | |
946 | ||
947 | offset_in_ca = offset & ~PAGE_MASK; | |
948 | end_offset = window->offset + | |
949 | (window->nr_pages << PAGE_SHIFT); | |
950 | ||
951 | tail_len = remaining_len & (L1_CACHE_BYTES - 1); | |
952 | remaining_len -= tail_len; | |
953 | while (remaining_len) { | |
954 | if (offset == end_offset) { | |
0d0ce9c0 | 955 | window = list_next_entry(window, list); |
7cc31cd2 SD |
956 | end_offset = window->offset + |
957 | (window->nr_pages << PAGE_SHIFT); | |
958 | } | |
959 | if (scif_is_mgmt_node()) | |
960 | temp_dma_addr = temp_phys; | |
961 | else | |
962 | /* Fix if we ever enable IOMMU on the card */ | |
963 | temp_dma_addr = (dma_addr_t)virt_to_phys(temp); | |
964 | window_dma_addr = scif_off_to_dma_addr(window, offset, | |
965 | &nr_contig_bytes, | |
966 | NULL); | |
967 | loop_len = min(nr_contig_bytes, remaining_len); | |
968 | if (src_local) { | |
969 | if (work->ordered && !tail_len && | |
970 | !(remaining_len - loop_len) && | |
971 | loop_len != L1_CACHE_BYTES) { | |
972 | /* | |
973 | * Break up the last chunk of the transfer into | |
974 | * two steps. if there is no tail to guarantee | |
975 | * DMA ordering. SCIF_DMA_POLLING inserts | |
976 | * a status update descriptor in step 1 which | |
977 | * acts as a double sided synchronization fence | |
978 | * for the DMA engine to ensure that the last | |
979 | * cache line in step 2 is updated last. | |
980 | */ | |
981 | /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ | |
982 | tx = | |
983 | dev->device_prep_dma_memcpy(chan, | |
984 | window_dma_addr, | |
985 | temp_dma_addr, | |
986 | loop_len - | |
987 | L1_CACHE_BYTES, | |
988 | DMA_PREP_FENCE); | |
989 | if (!tx) { | |
990 | ret = -ENOMEM; | |
991 | goto err; | |
992 | } | |
993 | cookie = tx->tx_submit(tx); | |
994 | if (dma_submit_error(cookie)) { | |
995 | ret = -ENOMEM; | |
996 | goto err; | |
997 | } | |
998 | dma_async_issue_pending(chan); | |
999 | offset += (loop_len - L1_CACHE_BYTES); | |
1000 | temp_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1001 | window_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1002 | remaining_len -= (loop_len - L1_CACHE_BYTES); | |
1003 | loop_len = remaining_len; | |
1004 | ||
1005 | /* Step 2) DMA: L1_CACHE_BYTES */ | |
1006 | tx = | |
1007 | dev->device_prep_dma_memcpy(chan, | |
1008 | window_dma_addr, | |
1009 | temp_dma_addr, | |
1010 | loop_len, 0); | |
1011 | if (!tx) { | |
1012 | ret = -ENOMEM; | |
1013 | goto err; | |
1014 | } | |
1015 | cookie = tx->tx_submit(tx); | |
1016 | if (dma_submit_error(cookie)) { | |
1017 | ret = -ENOMEM; | |
1018 | goto err; | |
1019 | } | |
1020 | dma_async_issue_pending(chan); | |
1021 | } else { | |
1022 | tx = | |
1023 | dev->device_prep_dma_memcpy(chan, | |
1024 | window_dma_addr, | |
1025 | temp_dma_addr, | |
1026 | loop_len, 0); | |
1027 | if (!tx) { | |
1028 | ret = -ENOMEM; | |
1029 | goto err; | |
1030 | } | |
1031 | cookie = tx->tx_submit(tx); | |
1032 | if (dma_submit_error(cookie)) { | |
1033 | ret = -ENOMEM; | |
1034 | goto err; | |
1035 | } | |
1036 | dma_async_issue_pending(chan); | |
1037 | } | |
1038 | } else { | |
1039 | tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr, | |
1040 | window_dma_addr, loop_len, 0); | |
1041 | if (!tx) { | |
1042 | ret = -ENOMEM; | |
1043 | goto err; | |
1044 | } | |
1045 | cookie = tx->tx_submit(tx); | |
1046 | if (dma_submit_error(cookie)) { | |
1047 | ret = -ENOMEM; | |
1048 | goto err; | |
1049 | } | |
1050 | dma_async_issue_pending(chan); | |
1051 | } | |
1052 | if (ret < 0) | |
1053 | goto err; | |
1054 | offset += loop_len; | |
1055 | temp += loop_len; | |
1056 | temp_phys += loop_len; | |
1057 | remaining_len -= loop_len; | |
1058 | offset_in_ca = 0; | |
1059 | } | |
1060 | if (tail_len) { | |
1061 | if (offset == end_offset) { | |
0d0ce9c0 | 1062 | window = list_next_entry(window, list); |
7cc31cd2 SD |
1063 | end_offset = window->offset + |
1064 | (window->nr_pages << PAGE_SHIFT); | |
1065 | } | |
1066 | window_virt_addr = ioremap_remote(offset, window, tail_len, | |
1067 | work->remote_dev, | |
1068 | NULL); | |
1069 | if (!window_virt_addr) | |
1070 | return -ENOMEM; | |
1071 | /* | |
1072 | * The CPU copy for the tail bytes must be initiated only once | |
1073 | * previous DMA transfers for this endpoint have completed | |
1074 | * to guarantee ordering. | |
1075 | */ | |
1076 | if (work->ordered) { | |
1077 | struct scif_dev *rdev = work->remote_dev; | |
1078 | ||
1079 | ret = scif_drain_dma_intr(rdev->sdev, chan); | |
1080 | if (ret) | |
1081 | return ret; | |
1082 | } | |
1083 | if (src_local) | |
1084 | scif_unaligned_cpy_toio(window_virt_addr, temp, | |
1085 | tail_len, work->ordered); | |
1086 | else | |
1087 | scif_unaligned_cpy_fromio(temp, window_virt_addr, | |
1088 | tail_len, work->ordered); | |
1089 | iounmap_remote(window_virt_addr, tail_len, work); | |
1090 | } | |
1091 | tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT); | |
1092 | if (!tx) { | |
1093 | ret = -ENOMEM; | |
1094 | return ret; | |
1095 | } | |
1096 | tx->callback = &scif_rma_completion_cb; | |
1097 | tx->callback_param = comp_cb; | |
1098 | cookie = tx->tx_submit(tx); | |
1099 | ||
1100 | if (dma_submit_error(cookie)) { | |
1101 | ret = -ENOMEM; | |
1102 | return ret; | |
1103 | } | |
1104 | dma_async_issue_pending(chan); | |
1105 | return 0; | |
1106 | err: | |
1107 | dev_err(scif_info.mdev.this_device, | |
1108 | "%s %d Desc Prog Failed ret %d\n", | |
1109 | __func__, __LINE__, ret); | |
1110 | return ret; | |
1111 | } | |
1112 | ||
1113 | /* | |
1114 | * _scif_rma_list_dma_copy_aligned: | |
1115 | * | |
1116 | * Traverse all the windows and perform DMA copy. | |
1117 | */ | |
1118 | static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |
1119 | struct dma_chan *chan) | |
1120 | { | |
1121 | dma_addr_t src_dma_addr, dst_dma_addr; | |
1122 | size_t loop_len, remaining_len, src_contig_bytes = 0; | |
1123 | size_t dst_contig_bytes = 0; | |
1124 | struct scif_window_iter src_win_iter; | |
1125 | struct scif_window_iter dst_win_iter; | |
1126 | s64 end_src_offset, end_dst_offset; | |
1127 | struct scif_window *src_window = work->src_window; | |
1128 | struct scif_window *dst_window = work->dst_window; | |
1129 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1130 | int ret = 0; | |
1131 | struct dma_async_tx_descriptor *tx; | |
1132 | struct dma_device *dev = chan->device; | |
1133 | dma_cookie_t cookie; | |
1134 | ||
1135 | remaining_len = work->len; | |
1136 | ||
1137 | scif_init_window_iter(src_window, &src_win_iter); | |
1138 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1139 | end_src_offset = src_window->offset + | |
1140 | (src_window->nr_pages << PAGE_SHIFT); | |
1141 | end_dst_offset = dst_window->offset + | |
1142 | (dst_window->nr_pages << PAGE_SHIFT); | |
1143 | while (remaining_len) { | |
1144 | if (src_offset == end_src_offset) { | |
0d0ce9c0 | 1145 | src_window = list_next_entry(src_window, list); |
7cc31cd2 SD |
1146 | end_src_offset = src_window->offset + |
1147 | (src_window->nr_pages << PAGE_SHIFT); | |
1148 | scif_init_window_iter(src_window, &src_win_iter); | |
1149 | } | |
1150 | if (dst_offset == end_dst_offset) { | |
0d0ce9c0 | 1151 | dst_window = list_next_entry(dst_window, list); |
7cc31cd2 SD |
1152 | end_dst_offset = dst_window->offset + |
1153 | (dst_window->nr_pages << PAGE_SHIFT); | |
1154 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1155 | } | |
1156 | ||
1157 | /* compute dma addresses for transfer */ | |
1158 | src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, | |
1159 | &src_contig_bytes, | |
1160 | &src_win_iter); | |
1161 | dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, | |
1162 | &dst_contig_bytes, | |
1163 | &dst_win_iter); | |
1164 | loop_len = min(src_contig_bytes, dst_contig_bytes); | |
1165 | loop_len = min(loop_len, remaining_len); | |
1166 | if (work->ordered && !(remaining_len - loop_len)) { | |
1167 | /* | |
1168 | * Break up the last chunk of the transfer into two | |
1169 | * steps to ensure that the last byte in step 2 is | |
1170 | * updated last. | |
1171 | */ | |
1172 | /* Step 1) DMA: Body Length - 1 */ | |
1173 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1174 | src_dma_addr, | |
1175 | loop_len - 1, | |
1176 | DMA_PREP_FENCE); | |
1177 | if (!tx) { | |
1178 | ret = -ENOMEM; | |
1179 | goto err; | |
1180 | } | |
1181 | cookie = tx->tx_submit(tx); | |
1182 | if (dma_submit_error(cookie)) { | |
1183 | ret = -ENOMEM; | |
1184 | goto err; | |
1185 | } | |
1186 | src_offset += (loop_len - 1); | |
1187 | dst_offset += (loop_len - 1); | |
1188 | src_dma_addr += (loop_len - 1); | |
1189 | dst_dma_addr += (loop_len - 1); | |
1190 | remaining_len -= (loop_len - 1); | |
1191 | loop_len = remaining_len; | |
1192 | ||
1193 | /* Step 2) DMA: 1 BYTES */ | |
1194 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1195 | src_dma_addr, loop_len, 0); | |
1196 | if (!tx) { | |
1197 | ret = -ENOMEM; | |
1198 | goto err; | |
1199 | } | |
1200 | cookie = tx->tx_submit(tx); | |
1201 | if (dma_submit_error(cookie)) { | |
1202 | ret = -ENOMEM; | |
1203 | goto err; | |
1204 | } | |
1205 | dma_async_issue_pending(chan); | |
1206 | } else { | |
1207 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1208 | src_dma_addr, loop_len, 0); | |
1209 | if (!tx) { | |
1210 | ret = -ENOMEM; | |
1211 | goto err; | |
1212 | } | |
1213 | cookie = tx->tx_submit(tx); | |
1214 | if (dma_submit_error(cookie)) { | |
1215 | ret = -ENOMEM; | |
1216 | goto err; | |
1217 | } | |
1218 | } | |
1219 | src_offset += loop_len; | |
1220 | dst_offset += loop_len; | |
1221 | remaining_len -= loop_len; | |
1222 | } | |
1223 | return ret; | |
1224 | err: | |
1225 | dev_err(scif_info.mdev.this_device, | |
1226 | "%s %d Desc Prog Failed ret %d\n", | |
1227 | __func__, __LINE__, ret); | |
1228 | return ret; | |
1229 | } | |
1230 | ||
1231 | /* | |
1232 | * scif_rma_list_dma_copy_aligned: | |
1233 | * | |
1234 | * Traverse all the windows and perform DMA copy. | |
1235 | */ | |
1236 | static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, | |
1237 | struct dma_chan *chan) | |
1238 | { | |
1239 | dma_addr_t src_dma_addr, dst_dma_addr; | |
1240 | size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0; | |
1241 | size_t dst_contig_bytes = 0; | |
1242 | int src_cache_off; | |
1243 | s64 end_src_offset, end_dst_offset; | |
1244 | struct scif_window_iter src_win_iter; | |
1245 | struct scif_window_iter dst_win_iter; | |
1246 | void *src_virt, *dst_virt; | |
1247 | struct scif_window *src_window = work->src_window; | |
1248 | struct scif_window *dst_window = work->dst_window; | |
1249 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1250 | int ret = 0; | |
1251 | struct dma_async_tx_descriptor *tx; | |
1252 | struct dma_device *dev = chan->device; | |
1253 | dma_cookie_t cookie; | |
1254 | ||
1255 | remaining_len = work->len; | |
1256 | scif_init_window_iter(src_window, &src_win_iter); | |
1257 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1258 | ||
1259 | src_cache_off = src_offset & (L1_CACHE_BYTES - 1); | |
1260 | if (src_cache_off != 0) { | |
1261 | /* Head */ | |
1262 | loop_len = L1_CACHE_BYTES - src_cache_off; | |
1263 | loop_len = min(loop_len, remaining_len); | |
1264 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); | |
1265 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); | |
1266 | if (src_window->type == SCIF_WINDOW_SELF) | |
1267 | src_virt = _get_local_va(src_offset, src_window, | |
1268 | loop_len); | |
1269 | else | |
1270 | src_virt = ioremap_remote(src_offset, src_window, | |
1271 | loop_len, | |
1272 | work->remote_dev, NULL); | |
1273 | if (!src_virt) | |
1274 | return -ENOMEM; | |
1275 | if (dst_window->type == SCIF_WINDOW_SELF) | |
1276 | dst_virt = _get_local_va(dst_offset, dst_window, | |
1277 | loop_len); | |
1278 | else | |
1279 | dst_virt = ioremap_remote(dst_offset, dst_window, | |
1280 | loop_len, | |
1281 | work->remote_dev, NULL); | |
1282 | if (!dst_virt) { | |
1283 | if (src_window->type != SCIF_WINDOW_SELF) | |
1284 | iounmap_remote(src_virt, loop_len, work); | |
1285 | return -ENOMEM; | |
1286 | } | |
1287 | if (src_window->type == SCIF_WINDOW_SELF) | |
1288 | scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, | |
1289 | remaining_len == loop_len ? | |
1290 | work->ordered : false); | |
1291 | else | |
1292 | scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len, | |
1293 | remaining_len == loop_len ? | |
1294 | work->ordered : false); | |
1295 | if (src_window->type != SCIF_WINDOW_SELF) | |
1296 | iounmap_remote(src_virt, loop_len, work); | |
1297 | if (dst_window->type != SCIF_WINDOW_SELF) | |
1298 | iounmap_remote(dst_virt, loop_len, work); | |
1299 | src_offset += loop_len; | |
1300 | dst_offset += loop_len; | |
1301 | remaining_len -= loop_len; | |
1302 | } | |
1303 | ||
1304 | end_src_offset = src_window->offset + | |
1305 | (src_window->nr_pages << PAGE_SHIFT); | |
1306 | end_dst_offset = dst_window->offset + | |
1307 | (dst_window->nr_pages << PAGE_SHIFT); | |
1308 | tail_len = remaining_len & (L1_CACHE_BYTES - 1); | |
1309 | remaining_len -= tail_len; | |
1310 | while (remaining_len) { | |
1311 | if (src_offset == end_src_offset) { | |
0d0ce9c0 | 1312 | src_window = list_next_entry(src_window, list); |
7cc31cd2 SD |
1313 | end_src_offset = src_window->offset + |
1314 | (src_window->nr_pages << PAGE_SHIFT); | |
1315 | scif_init_window_iter(src_window, &src_win_iter); | |
1316 | } | |
1317 | if (dst_offset == end_dst_offset) { | |
0d0ce9c0 | 1318 | dst_window = list_next_entry(dst_window, list); |
7cc31cd2 SD |
1319 | end_dst_offset = dst_window->offset + |
1320 | (dst_window->nr_pages << PAGE_SHIFT); | |
1321 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1322 | } | |
1323 | ||
1324 | /* compute dma addresses for transfer */ | |
1325 | src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, | |
1326 | &src_contig_bytes, | |
1327 | &src_win_iter); | |
1328 | dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, | |
1329 | &dst_contig_bytes, | |
1330 | &dst_win_iter); | |
1331 | loop_len = min(src_contig_bytes, dst_contig_bytes); | |
1332 | loop_len = min(loop_len, remaining_len); | |
1333 | if (work->ordered && !tail_len && | |
1334 | !(remaining_len - loop_len)) { | |
1335 | /* | |
1336 | * Break up the last chunk of the transfer into two | |
1337 | * steps. if there is no tail to gurantee DMA ordering. | |
1338 | * Passing SCIF_DMA_POLLING inserts a status update | |
1339 | * descriptor in step 1 which acts as a double sided | |
1340 | * synchronization fence for the DMA engine to ensure | |
1341 | * that the last cache line in step 2 is updated last. | |
1342 | */ | |
1343 | /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ | |
1344 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1345 | src_dma_addr, | |
1346 | loop_len - | |
1347 | L1_CACHE_BYTES, | |
1348 | DMA_PREP_FENCE); | |
1349 | if (!tx) { | |
1350 | ret = -ENOMEM; | |
1351 | goto err; | |
1352 | } | |
1353 | cookie = tx->tx_submit(tx); | |
1354 | if (dma_submit_error(cookie)) { | |
1355 | ret = -ENOMEM; | |
1356 | goto err; | |
1357 | } | |
1358 | dma_async_issue_pending(chan); | |
1359 | src_offset += (loop_len - L1_CACHE_BYTES); | |
1360 | dst_offset += (loop_len - L1_CACHE_BYTES); | |
1361 | src_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1362 | dst_dma_addr += (loop_len - L1_CACHE_BYTES); | |
1363 | remaining_len -= (loop_len - L1_CACHE_BYTES); | |
1364 | loop_len = remaining_len; | |
1365 | ||
1366 | /* Step 2) DMA: L1_CACHE_BYTES */ | |
1367 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1368 | src_dma_addr, | |
1369 | loop_len, 0); | |
1370 | if (!tx) { | |
1371 | ret = -ENOMEM; | |
1372 | goto err; | |
1373 | } | |
1374 | cookie = tx->tx_submit(tx); | |
1375 | if (dma_submit_error(cookie)) { | |
1376 | ret = -ENOMEM; | |
1377 | goto err; | |
1378 | } | |
1379 | dma_async_issue_pending(chan); | |
1380 | } else { | |
1381 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, | |
1382 | src_dma_addr, | |
1383 | loop_len, 0); | |
1384 | if (!tx) { | |
1385 | ret = -ENOMEM; | |
1386 | goto err; | |
1387 | } | |
1388 | cookie = tx->tx_submit(tx); | |
1389 | if (dma_submit_error(cookie)) { | |
1390 | ret = -ENOMEM; | |
1391 | goto err; | |
1392 | } | |
1393 | dma_async_issue_pending(chan); | |
1394 | } | |
1395 | src_offset += loop_len; | |
1396 | dst_offset += loop_len; | |
1397 | remaining_len -= loop_len; | |
1398 | } | |
1399 | remaining_len = tail_len; | |
1400 | if (remaining_len) { | |
1401 | loop_len = remaining_len; | |
1402 | if (src_offset == end_src_offset) | |
0d0ce9c0 | 1403 | src_window = list_next_entry(src_window, list); |
7cc31cd2 | 1404 | if (dst_offset == end_dst_offset) |
0d0ce9c0 | 1405 | dst_window = list_next_entry(dst_window, list); |
7cc31cd2 SD |
1406 | |
1407 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); | |
1408 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); | |
1409 | /* | |
1410 | * The CPU copy for the tail bytes must be initiated only once | |
1411 | * previous DMA transfers for this endpoint have completed to | |
1412 | * guarantee ordering. | |
1413 | */ | |
1414 | if (work->ordered) { | |
1415 | struct scif_dev *rdev = work->remote_dev; | |
1416 | ||
1417 | ret = scif_drain_dma_poll(rdev->sdev, chan); | |
1418 | if (ret) | |
1419 | return ret; | |
1420 | } | |
1421 | if (src_window->type == SCIF_WINDOW_SELF) | |
1422 | src_virt = _get_local_va(src_offset, src_window, | |
1423 | loop_len); | |
1424 | else | |
1425 | src_virt = ioremap_remote(src_offset, src_window, | |
1426 | loop_len, | |
1427 | work->remote_dev, NULL); | |
1428 | if (!src_virt) | |
1429 | return -ENOMEM; | |
1430 | ||
1431 | if (dst_window->type == SCIF_WINDOW_SELF) | |
1432 | dst_virt = _get_local_va(dst_offset, dst_window, | |
1433 | loop_len); | |
1434 | else | |
1435 | dst_virt = ioremap_remote(dst_offset, dst_window, | |
1436 | loop_len, | |
1437 | work->remote_dev, NULL); | |
1438 | if (!dst_virt) { | |
1439 | if (src_window->type != SCIF_WINDOW_SELF) | |
1440 | iounmap_remote(src_virt, loop_len, work); | |
1441 | return -ENOMEM; | |
1442 | } | |
1443 | ||
1444 | if (src_window->type == SCIF_WINDOW_SELF) | |
1445 | scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, | |
1446 | work->ordered); | |
1447 | else | |
1448 | scif_unaligned_cpy_fromio(dst_virt, src_virt, | |
1449 | loop_len, work->ordered); | |
1450 | if (src_window->type != SCIF_WINDOW_SELF) | |
1451 | iounmap_remote(src_virt, loop_len, work); | |
1452 | ||
1453 | if (dst_window->type != SCIF_WINDOW_SELF) | |
1454 | iounmap_remote(dst_virt, loop_len, work); | |
1455 | remaining_len -= loop_len; | |
1456 | } | |
1457 | return ret; | |
1458 | err: | |
1459 | dev_err(scif_info.mdev.this_device, | |
1460 | "%s %d Desc Prog Failed ret %d\n", | |
1461 | __func__, __LINE__, ret); | |
1462 | return ret; | |
1463 | } | |
1464 | ||
1465 | /* | |
1466 | * scif_rma_list_cpu_copy: | |
1467 | * | |
1468 | * Traverse all the windows and perform CPU copy. | |
1469 | */ | |
1470 | static int scif_rma_list_cpu_copy(struct scif_copy_work *work) | |
1471 | { | |
1472 | void *src_virt, *dst_virt; | |
1473 | size_t loop_len, remaining_len; | |
1474 | int src_page_off, dst_page_off; | |
1475 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1476 | struct scif_window *src_window = work->src_window; | |
1477 | struct scif_window *dst_window = work->dst_window; | |
1478 | s64 end_src_offset, end_dst_offset; | |
1479 | int ret = 0; | |
1480 | struct scif_window_iter src_win_iter; | |
1481 | struct scif_window_iter dst_win_iter; | |
1482 | ||
1483 | remaining_len = work->len; | |
1484 | ||
1485 | scif_init_window_iter(src_window, &src_win_iter); | |
1486 | scif_init_window_iter(dst_window, &dst_win_iter); | |
1487 | while (remaining_len) { | |
1488 | src_page_off = src_offset & ~PAGE_MASK; | |
1489 | dst_page_off = dst_offset & ~PAGE_MASK; | |
1490 | loop_len = min(PAGE_SIZE - | |
1491 | max(src_page_off, dst_page_off), | |
1492 | remaining_len); | |
1493 | ||
1494 | if (src_window->type == SCIF_WINDOW_SELF) | |
1495 | src_virt = _get_local_va(src_offset, src_window, | |
1496 | loop_len); | |
1497 | else | |
1498 | src_virt = ioremap_remote(src_offset, src_window, | |
1499 | loop_len, | |
1500 | work->remote_dev, | |
1501 | &src_win_iter); | |
1502 | if (!src_virt) { | |
1503 | ret = -ENOMEM; | |
1504 | goto error; | |
1505 | } | |
1506 | ||
1507 | if (dst_window->type == SCIF_WINDOW_SELF) | |
1508 | dst_virt = _get_local_va(dst_offset, dst_window, | |
1509 | loop_len); | |
1510 | else | |
1511 | dst_virt = ioremap_remote(dst_offset, dst_window, | |
1512 | loop_len, | |
1513 | work->remote_dev, | |
1514 | &dst_win_iter); | |
1515 | if (!dst_virt) { | |
1516 | if (src_window->type == SCIF_WINDOW_PEER) | |
1517 | iounmap_remote(src_virt, loop_len, work); | |
1518 | ret = -ENOMEM; | |
1519 | goto error; | |
1520 | } | |
1521 | ||
1522 | if (work->loopback) { | |
1523 | memcpy(dst_virt, src_virt, loop_len); | |
1524 | } else { | |
1525 | if (src_window->type == SCIF_WINDOW_SELF) | |
1526 | memcpy_toio((void __iomem __force *)dst_virt, | |
1527 | src_virt, loop_len); | |
1528 | else | |
1529 | memcpy_fromio(dst_virt, | |
1530 | (void __iomem __force *)src_virt, | |
1531 | loop_len); | |
1532 | } | |
1533 | if (src_window->type == SCIF_WINDOW_PEER) | |
1534 | iounmap_remote(src_virt, loop_len, work); | |
1535 | ||
1536 | if (dst_window->type == SCIF_WINDOW_PEER) | |
1537 | iounmap_remote(dst_virt, loop_len, work); | |
1538 | ||
1539 | src_offset += loop_len; | |
1540 | dst_offset += loop_len; | |
1541 | remaining_len -= loop_len; | |
1542 | if (remaining_len) { | |
1543 | end_src_offset = src_window->offset + | |
1544 | (src_window->nr_pages << PAGE_SHIFT); | |
1545 | end_dst_offset = dst_window->offset + | |
1546 | (dst_window->nr_pages << PAGE_SHIFT); | |
1547 | if (src_offset == end_src_offset) { | |
0d0ce9c0 | 1548 | src_window = list_next_entry(src_window, list); |
7cc31cd2 SD |
1549 | scif_init_window_iter(src_window, |
1550 | &src_win_iter); | |
1551 | } | |
1552 | if (dst_offset == end_dst_offset) { | |
0d0ce9c0 | 1553 | dst_window = list_next_entry(dst_window, list); |
7cc31cd2 SD |
1554 | scif_init_window_iter(dst_window, |
1555 | &dst_win_iter); | |
1556 | } | |
1557 | } | |
1558 | } | |
1559 | error: | |
1560 | return ret; | |
1561 | } | |
1562 | ||
1563 | static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd, | |
1564 | struct scif_copy_work *work, | |
1565 | struct dma_chan *chan, off_t loffset) | |
1566 | { | |
1567 | int src_cache_off, dst_cache_off; | |
1568 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; | |
1569 | u8 *temp = NULL; | |
1570 | bool src_local = true, dst_local = false; | |
1571 | struct scif_dma_comp_cb *comp_cb; | |
1572 | dma_addr_t src_dma_addr, dst_dma_addr; | |
1573 | int err; | |
1574 | ||
1575 | if (is_dma_copy_aligned(chan->device, 1, 1, 1)) | |
1576 | return _scif_rma_list_dma_copy_aligned(work, chan); | |
1577 | ||
1578 | src_cache_off = src_offset & (L1_CACHE_BYTES - 1); | |
1579 | dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1); | |
1580 | ||
1581 | if (dst_cache_off == src_cache_off) | |
1582 | return scif_rma_list_dma_copy_aligned(work, chan); | |
1583 | ||
1584 | if (work->loopback) | |
1585 | return scif_rma_list_cpu_copy(work); | |
1586 | src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset); | |
1587 | dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset); | |
1588 | src_local = work->src_window->type == SCIF_WINDOW_SELF; | |
1589 | dst_local = work->dst_window->type == SCIF_WINDOW_SELF; | |
1590 | ||
1591 | dst_local = dst_local; | |
1592 | /* Allocate dma_completion cb */ | |
1593 | comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL); | |
1594 | if (!comp_cb) | |
1595 | goto error; | |
1596 | ||
1597 | work->comp_cb = comp_cb; | |
1598 | comp_cb->cb_cookie = comp_cb; | |
1599 | comp_cb->dma_completion_func = &scif_rma_completion_cb; | |
1600 | ||
1601 | if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) { | |
1602 | comp_cb->is_cache = false; | |
1603 | /* Allocate padding bytes to align to a cache line */ | |
1604 | temp = kmalloc(work->len + (L1_CACHE_BYTES << 1), | |
1605 | GFP_KERNEL); | |
1606 | if (!temp) | |
1607 | goto free_comp_cb; | |
1608 | comp_cb->temp_buf_to_free = temp; | |
1609 | /* kmalloc(..) does not guarantee cache line alignment */ | |
1610 | if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES)) | |
1611 | temp = PTR_ALIGN(temp, L1_CACHE_BYTES); | |
1612 | } else { | |
1613 | comp_cb->is_cache = true; | |
1614 | temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL); | |
1615 | if (!temp) | |
1616 | goto free_comp_cb; | |
1617 | comp_cb->temp_buf_to_free = temp; | |
1618 | } | |
1619 | ||
1620 | if (src_local) { | |
1621 | temp += dst_cache_off; | |
1622 | scif_rma_local_cpu_copy(work->src_offset, work->src_window, | |
1623 | temp, work->len, true); | |
1624 | } else { | |
1625 | comp_cb->dst_window = work->dst_window; | |
1626 | comp_cb->dst_offset = work->dst_offset; | |
1627 | work->src_offset = work->src_offset - src_cache_off; | |
1628 | comp_cb->len = work->len; | |
1629 | work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES); | |
1630 | comp_cb->header_padding = src_cache_off; | |
1631 | } | |
1632 | comp_cb->temp_buf = temp; | |
1633 | ||
1634 | err = scif_map_single(&comp_cb->temp_phys, temp, | |
1635 | work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE); | |
1636 | if (err) | |
1637 | goto free_temp_buf; | |
1638 | comp_cb->sdev = work->remote_dev; | |
1639 | if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0) | |
1640 | goto free_temp_buf; | |
1641 | if (!src_local) | |
1642 | work->fence_type = SCIF_DMA_INTR; | |
1643 | return 0; | |
1644 | free_temp_buf: | |
1645 | if (comp_cb->is_cache) | |
1646 | kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free); | |
1647 | else | |
1648 | kfree(comp_cb->temp_buf_to_free); | |
1649 | free_comp_cb: | |
1650 | kfree(comp_cb); | |
1651 | error: | |
1652 | return -ENOMEM; | |
1653 | } | |
1654 | ||
1655 | /** | |
1656 | * scif_rma_copy: | |
1657 | * @epd: end point descriptor. | |
1658 | * @loffset: offset in local registered address space to/from which to copy | |
1659 | * @addr: user virtual address to/from which to copy | |
1660 | * @len: length of range to copy | |
1661 | * @roffset: offset in remote registered address space to/from which to copy | |
1662 | * @flags: flags | |
1663 | * @dir: LOCAL->REMOTE or vice versa. | |
1664 | * @last_chunk: true if this is the last chunk of a larger transfer | |
1665 | * | |
1666 | * Validate parameters, check if src/dst registered ranges requested for copy | |
1667 | * are valid and initiate either CPU or DMA copy. | |
1668 | */ | |
1669 | static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr, | |
1670 | size_t len, off_t roffset, int flags, | |
1671 | enum scif_rma_dir dir, bool last_chunk) | |
1672 | { | |
1673 | struct scif_endpt *ep = (struct scif_endpt *)epd; | |
1674 | struct scif_rma_req remote_req; | |
1675 | struct scif_rma_req req; | |
1676 | struct scif_window *local_window = NULL; | |
1677 | struct scif_window *remote_window = NULL; | |
1678 | struct scif_copy_work copy_work; | |
1679 | bool loopback; | |
1680 | int err = 0; | |
1681 | struct dma_chan *chan; | |
1682 | struct scif_mmu_notif *mmn = NULL; | |
1683 | bool cache = false; | |
1684 | struct device *spdev; | |
1685 | ||
1686 | err = scif_verify_epd(ep); | |
1687 | if (err) | |
1688 | return err; | |
1689 | ||
1690 | if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE | | |
1691 | SCIF_RMA_SYNC | SCIF_RMA_ORDERED))) | |
1692 | return -EINVAL; | |
1693 | ||
1694 | loopback = scifdev_self(ep->remote_dev) ? true : false; | |
1695 | copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ? | |
1696 | SCIF_DMA_POLL : 0; | |
1697 | copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk); | |
1698 | ||
1699 | /* Use CPU for Mgmt node <-> Mgmt node copies */ | |
1700 | if (loopback && scif_is_mgmt_node()) { | |
1701 | flags |= SCIF_RMA_USECPU; | |
1702 | copy_work.fence_type = 0x0; | |
1703 | } | |
1704 | ||
1705 | cache = scif_is_set_reg_cache(flags); | |
1706 | ||
1707 | remote_req.out_window = &remote_window; | |
1708 | remote_req.offset = roffset; | |
1709 | remote_req.nr_bytes = len; | |
1710 | /* | |
1711 | * If transfer is from local to remote then the remote window | |
1712 | * must be writeable and vice versa. | |
1713 | */ | |
1714 | remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ; | |
1715 | remote_req.type = SCIF_WINDOW_PARTIAL; | |
1716 | remote_req.head = &ep->rma_info.remote_reg_list; | |
1717 | ||
1718 | spdev = scif_get_peer_dev(ep->remote_dev); | |
1719 | if (IS_ERR(spdev)) { | |
1720 | err = PTR_ERR(spdev); | |
1721 | return err; | |
1722 | } | |
1723 | ||
1724 | if (addr && cache) { | |
1725 | mutex_lock(&ep->rma_info.mmn_lock); | |
1726 | mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); | |
1727 | if (!mmn) | |
1728 | scif_add_mmu_notifier(current->mm, ep); | |
1729 | mutex_unlock(&ep->rma_info.mmn_lock); | |
1730 | if (IS_ERR(mmn)) { | |
1731 | scif_put_peer_dev(spdev); | |
1732 | return PTR_ERR(mmn); | |
1733 | } | |
1734 | cache = cache && !scif_rma_tc_can_cache(ep, len); | |
1735 | } | |
1736 | mutex_lock(&ep->rma_info.rma_lock); | |
1737 | if (addr) { | |
1738 | req.out_window = &local_window; | |
1739 | req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK), | |
1740 | PAGE_SIZE); | |
1741 | req.va_for_temp = addr & PAGE_MASK; | |
1742 | req.prot = (dir == SCIF_LOCAL_TO_REMOTE ? | |
1743 | VM_READ : VM_WRITE | VM_READ); | |
1744 | /* Does a valid local window exist? */ | |
1745 | if (mmn) { | |
1746 | spin_lock(&ep->rma_info.tc_lock); | |
1747 | req.head = &mmn->tc_reg_list; | |
1748 | err = scif_query_tcw(ep, &req); | |
1749 | spin_unlock(&ep->rma_info.tc_lock); | |
1750 | } | |
1751 | if (!mmn || err) { | |
1752 | err = scif_register_temp(epd, req.va_for_temp, | |
1753 | req.nr_bytes, req.prot, | |
1754 | &loffset, &local_window); | |
1755 | if (err) { | |
1756 | mutex_unlock(&ep->rma_info.rma_lock); | |
1757 | goto error; | |
1758 | } | |
1759 | if (!cache) | |
1760 | goto skip_cache; | |
1761 | atomic_inc(&ep->rma_info.tcw_refcount); | |
1762 | atomic_add_return(local_window->nr_pages, | |
1763 | &ep->rma_info.tcw_total_pages); | |
1764 | if (mmn) { | |
1765 | spin_lock(&ep->rma_info.tc_lock); | |
1766 | scif_insert_tcw(local_window, | |
1767 | &mmn->tc_reg_list); | |
1768 | spin_unlock(&ep->rma_info.tc_lock); | |
1769 | } | |
1770 | } | |
1771 | skip_cache: | |
1772 | loffset = local_window->offset + | |
1773 | (addr - local_window->va_for_temp); | |
1774 | } else { | |
1775 | req.out_window = &local_window; | |
1776 | req.offset = loffset; | |
1777 | /* | |
1778 | * If transfer is from local to remote then the self window | |
1779 | * must be readable and vice versa. | |
1780 | */ | |
1781 | req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE; | |
1782 | req.nr_bytes = len; | |
1783 | req.type = SCIF_WINDOW_PARTIAL; | |
1784 | req.head = &ep->rma_info.reg_list; | |
1785 | /* Does a valid local window exist? */ | |
1786 | err = scif_query_window(&req); | |
1787 | if (err) { | |
1788 | mutex_unlock(&ep->rma_info.rma_lock); | |
1789 | goto error; | |
1790 | } | |
1791 | } | |
1792 | ||
1793 | /* Does a valid remote window exist? */ | |
1794 | err = scif_query_window(&remote_req); | |
1795 | if (err) { | |
1796 | mutex_unlock(&ep->rma_info.rma_lock); | |
1797 | goto error; | |
1798 | } | |
1799 | ||
1800 | /* | |
1801 | * Prepare copy_work for submitting work to the DMA kernel thread | |
1802 | * or CPU copy routine. | |
1803 | */ | |
1804 | copy_work.len = len; | |
1805 | copy_work.loopback = loopback; | |
1806 | copy_work.remote_dev = ep->remote_dev; | |
1807 | if (dir == SCIF_LOCAL_TO_REMOTE) { | |
1808 | copy_work.src_offset = loffset; | |
1809 | copy_work.src_window = local_window; | |
1810 | copy_work.dst_offset = roffset; | |
1811 | copy_work.dst_window = remote_window; | |
1812 | } else { | |
1813 | copy_work.src_offset = roffset; | |
1814 | copy_work.src_window = remote_window; | |
1815 | copy_work.dst_offset = loffset; | |
1816 | copy_work.dst_window = local_window; | |
1817 | } | |
1818 | ||
1819 | if (flags & SCIF_RMA_USECPU) { | |
1820 | scif_rma_list_cpu_copy(©_work); | |
1821 | } else { | |
1822 | chan = ep->rma_info.dma_chan; | |
1823 | err = scif_rma_list_dma_copy_wrapper(epd, ©_work, | |
1824 | chan, loffset); | |
1825 | } | |
1826 | if (addr && !cache) | |
1827 | atomic_inc(&ep->rma_info.tw_refcount); | |
1828 | ||
1829 | mutex_unlock(&ep->rma_info.rma_lock); | |
1830 | ||
1831 | if (last_chunk) { | |
1832 | struct scif_dev *rdev = ep->remote_dev; | |
1833 | ||
1834 | if (copy_work.fence_type == SCIF_DMA_POLL) | |
1835 | err = scif_drain_dma_poll(rdev->sdev, | |
1836 | ep->rma_info.dma_chan); | |
1837 | else if (copy_work.fence_type == SCIF_DMA_INTR) | |
1838 | err = scif_drain_dma_intr(rdev->sdev, | |
1839 | ep->rma_info.dma_chan); | |
1840 | } | |
1841 | ||
1842 | if (addr && !cache) | |
1843 | scif_queue_for_cleanup(local_window, &scif_info.rma); | |
1844 | scif_put_peer_dev(spdev); | |
1845 | return err; | |
1846 | error: | |
1847 | if (err) { | |
1848 | if (addr && local_window && !cache) | |
1849 | scif_destroy_window(ep, local_window); | |
1850 | dev_err(scif_info.mdev.this_device, | |
1851 | "%s %d err %d len 0x%lx\n", | |
1852 | __func__, __LINE__, err, len); | |
1853 | } | |
1854 | scif_put_peer_dev(spdev); | |
1855 | return err; | |
1856 | } | |
1857 | ||
1858 | int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, | |
1859 | off_t roffset, int flags) | |
1860 | { | |
1861 | int err; | |
1862 | ||
1863 | dev_dbg(scif_info.mdev.this_device, | |
1864 | "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n", | |
1865 | epd, loffset, len, roffset, flags); | |
1866 | if (scif_unaligned(loffset, roffset)) { | |
1867 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1868 | err = scif_rma_copy(epd, loffset, 0x0, | |
1869 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1870 | roffset, flags, | |
1871 | SCIF_REMOTE_TO_LOCAL, false); | |
1872 | if (err) | |
1873 | goto readfrom_err; | |
1874 | loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1875 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1876 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1877 | } | |
1878 | } | |
1879 | err = scif_rma_copy(epd, loffset, 0x0, len, | |
1880 | roffset, flags, SCIF_REMOTE_TO_LOCAL, true); | |
1881 | readfrom_err: | |
1882 | return err; | |
1883 | } | |
1884 | EXPORT_SYMBOL_GPL(scif_readfrom); | |
1885 | ||
1886 | int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, | |
1887 | off_t roffset, int flags) | |
1888 | { | |
1889 | int err; | |
1890 | ||
1891 | dev_dbg(scif_info.mdev.this_device, | |
1892 | "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n", | |
1893 | epd, loffset, len, roffset, flags); | |
1894 | if (scif_unaligned(loffset, roffset)) { | |
1895 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1896 | err = scif_rma_copy(epd, loffset, 0x0, | |
1897 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1898 | roffset, flags, | |
1899 | SCIF_LOCAL_TO_REMOTE, false); | |
1900 | if (err) | |
1901 | goto writeto_err; | |
1902 | loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1903 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1904 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1905 | } | |
1906 | } | |
1907 | err = scif_rma_copy(epd, loffset, 0x0, len, | |
1908 | roffset, flags, SCIF_LOCAL_TO_REMOTE, true); | |
1909 | writeto_err: | |
1910 | return err; | |
1911 | } | |
1912 | EXPORT_SYMBOL_GPL(scif_writeto); | |
1913 | ||
1914 | int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, | |
1915 | off_t roffset, int flags) | |
1916 | { | |
1917 | int err; | |
1918 | ||
1919 | dev_dbg(scif_info.mdev.this_device, | |
1920 | "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", | |
1921 | epd, addr, len, roffset, flags); | |
1922 | if (scif_unaligned((off_t __force)addr, roffset)) { | |
1923 | if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) | |
1924 | flags &= ~SCIF_RMA_USECACHE; | |
1925 | ||
1926 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1927 | err = scif_rma_copy(epd, 0, (u64)addr, | |
1928 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1929 | roffset, flags, | |
1930 | SCIF_REMOTE_TO_LOCAL, false); | |
1931 | if (err) | |
1932 | goto vreadfrom_err; | |
1933 | addr += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1934 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1935 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1936 | } | |
1937 | } | |
1938 | err = scif_rma_copy(epd, 0, (u64)addr, len, | |
1939 | roffset, flags, SCIF_REMOTE_TO_LOCAL, true); | |
1940 | vreadfrom_err: | |
1941 | return err; | |
1942 | } | |
1943 | EXPORT_SYMBOL_GPL(scif_vreadfrom); | |
1944 | ||
1945 | int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, | |
1946 | off_t roffset, int flags) | |
1947 | { | |
1948 | int err; | |
1949 | ||
1950 | dev_dbg(scif_info.mdev.this_device, | |
1951 | "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", | |
1952 | epd, addr, len, roffset, flags); | |
1953 | if (scif_unaligned((off_t __force)addr, roffset)) { | |
1954 | if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) | |
1955 | flags &= ~SCIF_RMA_USECACHE; | |
1956 | ||
1957 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { | |
1958 | err = scif_rma_copy(epd, 0, (u64)addr, | |
1959 | SCIF_MAX_UNALIGNED_BUF_SIZE, | |
1960 | roffset, flags, | |
1961 | SCIF_LOCAL_TO_REMOTE, false); | |
1962 | if (err) | |
1963 | goto vwriteto_err; | |
1964 | addr += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1965 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1966 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; | |
1967 | } | |
1968 | } | |
1969 | err = scif_rma_copy(epd, 0, (u64)addr, len, | |
1970 | roffset, flags, SCIF_LOCAL_TO_REMOTE, true); | |
1971 | vwriteto_err: | |
1972 | return err; | |
1973 | } | |
1974 | EXPORT_SYMBOL_GPL(scif_vwriteto); |