misc: mic: scif: scif_fence: Fix a bunch of different documentation issues
[linux-block.git] / drivers / misc / mic / scif / scif_fence.c
CommitLineData
1802d0be 1// SPDX-License-Identifier: GPL-2.0-only
564c8d8d
SD
2/*
3 * Intel MIC Platform Software Stack (MPSS)
4 *
5 * Copyright(c) 2015 Intel Corporation.
6 *
564c8d8d 7 * Intel SCIF driver.
564c8d8d
SD
8 */
9
10#include "scif_main.h"
11
12/**
13 * scif_recv_mark: Handle SCIF_MARK request
bb11b4eb 14 * @scifdev: SCIF device
564c8d8d
SD
15 * @msg: Interrupt message
16 *
17 * The peer has requested a mark.
18 */
19void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg)
20{
21 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
05c4569b
DC
22 int mark = 0;
23 int err;
564c8d8d
SD
24
25 err = _scif_fence_mark(ep, &mark);
26 if (err)
27 msg->uop = SCIF_MARK_NACK;
28 else
29 msg->uop = SCIF_MARK_ACK;
30 msg->payload[0] = ep->remote_ep;
31 msg->payload[2] = mark;
32 scif_nodeqp_send(ep->remote_dev, msg);
33}
34
35/**
36 * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages.
bb11b4eb 37 * @scifdev: SCIF device
564c8d8d
SD
38 * @msg: Interrupt message
39 *
40 * The peer has responded to a SCIF_MARK message.
41 */
42void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg)
43{
44 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
45 struct scif_fence_info *fence_req =
46 (struct scif_fence_info *)msg->payload[1];
47
48 mutex_lock(&ep->rma_info.rma_lock);
49 if (msg->uop == SCIF_MARK_ACK) {
50 fence_req->state = OP_COMPLETED;
51 fence_req->dma_mark = (int)msg->payload[2];
52 } else {
53 fence_req->state = OP_FAILED;
54 }
55 mutex_unlock(&ep->rma_info.rma_lock);
56 complete(&fence_req->comp);
57}
58
59/**
60 * scif_recv_wait: Handle SCIF_WAIT request
bb11b4eb 61 * @scifdev: SCIF device
564c8d8d
SD
62 * @msg: Interrupt message
63 *
64 * The peer has requested waiting on a fence.
65 */
66void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg)
67{
68 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
69 struct scif_remote_fence_info *fence;
70
71 /*
72 * Allocate structure for remote fence information and
73 * send a NACK if the allocation failed. The peer will
74 * return ENOMEM upon receiving a NACK.
75 */
76 fence = kmalloc(sizeof(*fence), GFP_KERNEL);
77 if (!fence) {
78 msg->payload[0] = ep->remote_ep;
79 msg->uop = SCIF_WAIT_NACK;
80 scif_nodeqp_send(ep->remote_dev, msg);
81 return;
82 }
83
84 /* Prepare the fence request */
85 memcpy(&fence->msg, msg, sizeof(struct scifmsg));
86 INIT_LIST_HEAD(&fence->list);
87
88 /* Insert to the global remote fence request list */
89 mutex_lock(&scif_info.fencelock);
90 atomic_inc(&ep->rma_info.fence_refcount);
91 list_add_tail(&fence->list, &scif_info.fence);
92 mutex_unlock(&scif_info.fencelock);
93
94 schedule_work(&scif_info.misc_work);
95}
96
97/**
98 * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages.
bb11b4eb 99 * @scifdev: SCIF device
564c8d8d
SD
100 * @msg: Interrupt message
101 *
102 * The peer has responded to a SCIF_WAIT message.
103 */
104void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg)
105{
106 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
107 struct scif_fence_info *fence_req =
108 (struct scif_fence_info *)msg->payload[1];
109
110 mutex_lock(&ep->rma_info.rma_lock);
111 if (msg->uop == SCIF_WAIT_ACK)
112 fence_req->state = OP_COMPLETED;
113 else
114 fence_req->state = OP_FAILED;
115 mutex_unlock(&ep->rma_info.rma_lock);
116 complete(&fence_req->comp);
117}
118
119/**
120 * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request
bb11b4eb 121 * @scifdev: SCIF device
564c8d8d
SD
122 * @msg: Interrupt message
123 *
124 * The peer has requested a signal on a local offset.
125 */
126void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg)
127{
128 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
129 int err;
130
131 err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
132 SCIF_WINDOW_SELF);
133 if (err)
134 msg->uop = SCIF_SIG_NACK;
135 else
136 msg->uop = SCIF_SIG_ACK;
137 msg->payload[0] = ep->remote_ep;
138 scif_nodeqp_send(ep->remote_dev, msg);
139}
140
141/**
142 * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request
bb11b4eb 143 * @scifdev: SCIF device
564c8d8d
SD
144 * @msg: Interrupt message
145 *
146 * The peer has requested a signal on a remote offset.
147 */
148void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg)
149{
150 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
151 int err;
152
153 err = scif_prog_signal(ep, msg->payload[1], msg->payload[2],
154 SCIF_WINDOW_PEER);
155 if (err)
156 msg->uop = SCIF_SIG_NACK;
157 else
158 msg->uop = SCIF_SIG_ACK;
159 msg->payload[0] = ep->remote_ep;
160 scif_nodeqp_send(ep->remote_dev, msg);
161}
162
163/**
164 * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages.
bb11b4eb 165 * @scifdev: SCIF device
564c8d8d
SD
166 * @msg: Interrupt message
167 *
168 * The peer has responded to a signal request.
169 */
170void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg)
171{
172 struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0];
173 struct scif_fence_info *fence_req =
174 (struct scif_fence_info *)msg->payload[3];
175
176 mutex_lock(&ep->rma_info.rma_lock);
177 if (msg->uop == SCIF_SIG_ACK)
178 fence_req->state = OP_COMPLETED;
179 else
180 fence_req->state = OP_FAILED;
181 mutex_unlock(&ep->rma_info.rma_lock);
182 complete(&fence_req->comp);
183}
184
185static inline void *scif_get_local_va(off_t off, struct scif_window *window)
186{
187 struct page **pages = window->pinned_pages->pages;
188 int page_nr = (off - window->offset) >> PAGE_SHIFT;
189 off_t page_off = off & ~PAGE_MASK;
190
191 return page_address(pages[page_nr]) + page_off;
192}
193
194static void scif_prog_signal_cb(void *arg)
195{
15b3048a 196 struct scif_cb_arg *cb_arg = arg;
564c8d8d 197
15b3048a
WW
198 dma_pool_free(cb_arg->ep->remote_dev->signal_pool, cb_arg->status,
199 cb_arg->src_dma_addr);
200 kfree(cb_arg);
564c8d8d
SD
201}
202
203static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val)
204{
205 struct scif_endpt *ep = (struct scif_endpt *)epd;
206 struct dma_chan *chan = ep->rma_info.dma_chan;
207 struct dma_device *ddev = chan->device;
208 bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1);
209 struct dma_async_tx_descriptor *tx;
210 struct scif_status *status = NULL;
15b3048a 211 struct scif_cb_arg *cb_arg = NULL;
564c8d8d
SD
212 dma_addr_t src;
213 dma_cookie_t cookie;
214 int err;
215
216 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
217 if (!tx) {
218 err = -ENOMEM;
219 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
220 __func__, __LINE__, err);
221 goto alloc_fail;
222 }
223 cookie = tx->tx_submit(tx);
224 if (dma_submit_error(cookie)) {
225 err = (int)cookie;
226 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
227 __func__, __LINE__, err);
228 goto alloc_fail;
229 }
230 dma_async_issue_pending(chan);
231 if (x100) {
232 /*
233 * For X100 use the status descriptor to write the value to
234 * the destination.
235 */
236 tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0);
237 } else {
238 status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL,
239 &src);
240 if (!status) {
241 err = -ENOMEM;
242 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
243 __func__, __LINE__, err);
244 goto alloc_fail;
245 }
246 status->val = val;
247 status->src_dma_addr = src;
248 status->ep = ep;
249 src += offsetof(struct scif_status, val);
250 tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val),
251 DMA_PREP_INTERRUPT);
252 }
253 if (!tx) {
254 err = -ENOMEM;
255 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
256 __func__, __LINE__, err);
257 goto dma_fail;
258 }
259 if (!x100) {
15b3048a
WW
260 cb_arg = kmalloc(sizeof(*cb_arg), GFP_KERNEL);
261 if (!cb_arg) {
262 err = -ENOMEM;
263 goto dma_fail;
264 }
265 cb_arg->src_dma_addr = src;
266 cb_arg->status = status;
267 cb_arg->ep = ep;
564c8d8d 268 tx->callback = scif_prog_signal_cb;
15b3048a 269 tx->callback_param = cb_arg;
564c8d8d
SD
270 }
271 cookie = tx->tx_submit(tx);
272 if (dma_submit_error(cookie)) {
273 err = -EIO;
274 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
275 __func__, __LINE__, err);
276 goto dma_fail;
277 }
278 dma_async_issue_pending(chan);
279 return 0;
280dma_fail:
15b3048a 281 if (!x100) {
564c8d8d 282 dma_pool_free(ep->remote_dev->signal_pool, status,
6b995f4e 283 src - offsetof(struct scif_status, val));
15b3048a
WW
284 kfree(cb_arg);
285 }
564c8d8d
SD
286alloc_fail:
287 return err;
288}
289
bb11b4eb 290/**
564c8d8d 291 * scif_prog_signal:
bb11b4eb
LJ
292 * @epd: Endpoint Descriptor
293 * @offset: registered address to write @val to
294 * @val: Value to be written at @offset
295 * @type: Type of the window.
564c8d8d
SD
296 *
297 * Arrange to write a value to the registered offset after ensuring that the
298 * offset provided is indeed valid.
299 */
300int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val,
301 enum scif_window_type type)
302{
303 struct scif_endpt *ep = (struct scif_endpt *)epd;
304 struct scif_window *window = NULL;
305 struct scif_rma_req req;
306 dma_addr_t dst_dma_addr;
307 int err;
308
309 mutex_lock(&ep->rma_info.rma_lock);
310 req.out_window = &window;
311 req.offset = offset;
312 req.nr_bytes = sizeof(u64);
313 req.prot = SCIF_PROT_WRITE;
314 req.type = SCIF_WINDOW_SINGLE;
315 if (type == SCIF_WINDOW_SELF)
316 req.head = &ep->rma_info.reg_list;
317 else
318 req.head = &ep->rma_info.remote_reg_list;
319 /* Does a valid window exist? */
320 err = scif_query_window(&req);
321 if (err) {
322 dev_err(scif_info.mdev.this_device,
323 "%s %d err %d\n", __func__, __LINE__, err);
324 goto unlock_ret;
325 }
326
327 if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) {
328 u64 *dst_virt;
329
330 if (type == SCIF_WINDOW_SELF)
331 dst_virt = scif_get_local_va(offset, window);
332 else
333 dst_virt =
334 scif_get_local_va(offset, (struct scif_window *)
335 window->peer_window);
336 *dst_virt = val;
337 } else {
338 dst_dma_addr = __scif_off_to_dma_addr(window, offset);
339 err = _scif_prog_signal(epd, dst_dma_addr, val);
340 }
341unlock_ret:
342 mutex_unlock(&ep->rma_info.rma_lock);
343 return err;
344}
345
346static int _scif_fence_wait(scif_epd_t epd, int mark)
347{
348 struct scif_endpt *ep = (struct scif_endpt *)epd;
349 dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE;
350 int err;
351
352 /* Wait for DMA callback in scif_fence_mark_cb(..) */
353 err = wait_event_interruptible_timeout(ep->rma_info.markwq,
354 dma_async_is_tx_complete(
355 ep->rma_info.dma_chan,
356 cookie, NULL, NULL) ==
357 DMA_COMPLETE,
358 SCIF_NODE_ALIVE_TIMEOUT);
359 if (!err)
360 err = -ETIMEDOUT;
361 else if (err > 0)
362 err = 0;
363 return err;
364}
365
366/**
367 * scif_rma_handle_remote_fences:
368 *
369 * This routine services remote fence requests.
370 */
371void scif_rma_handle_remote_fences(void)
372{
373 struct list_head *item, *tmp;
374 struct scif_remote_fence_info *fence;
375 struct scif_endpt *ep;
376 int mark, err;
377
378 might_sleep();
379 mutex_lock(&scif_info.fencelock);
380 list_for_each_safe(item, tmp, &scif_info.fence) {
381 fence = list_entry(item, struct scif_remote_fence_info,
382 list);
383 /* Remove fence from global list */
384 list_del(&fence->list);
385
386 /* Initiate the fence operation */
387 ep = (struct scif_endpt *)fence->msg.payload[0];
388 mark = fence->msg.payload[2];
389 err = _scif_fence_wait(ep, mark);
390 if (err)
391 fence->msg.uop = SCIF_WAIT_NACK;
392 else
393 fence->msg.uop = SCIF_WAIT_ACK;
394 fence->msg.payload[0] = ep->remote_ep;
395 scif_nodeqp_send(ep->remote_dev, &fence->msg);
396 kfree(fence);
397 if (!atomic_sub_return(1, &ep->rma_info.fence_refcount))
398 schedule_work(&scif_info.misc_work);
399 }
400 mutex_unlock(&scif_info.fencelock);
401}
402
403static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark)
404{
405 int err;
406 struct scifmsg msg;
407 struct scif_fence_info *fence_req;
408 struct scif_endpt *ep = (struct scif_endpt *)epd;
409
410 fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
411 if (!fence_req) {
412 err = -ENOMEM;
413 goto error;
414 }
415
416 fence_req->state = OP_IN_PROGRESS;
417 init_completion(&fence_req->comp);
418
419 msg.src = ep->port;
420 msg.uop = uop;
421 msg.payload[0] = ep->remote_ep;
422 msg.payload[1] = (u64)fence_req;
423 if (uop == SCIF_WAIT)
424 msg.payload[2] = mark;
425 spin_lock(&ep->lock);
426 if (ep->state == SCIFEP_CONNECTED)
427 err = scif_nodeqp_send(ep->remote_dev, &msg);
428 else
429 err = -ENOTCONN;
430 spin_unlock(&ep->lock);
431 if (err)
432 goto error_free;
433retry:
434 /* Wait for a SCIF_WAIT_(N)ACK message */
435 err = wait_for_completion_timeout(&fence_req->comp,
436 SCIF_NODE_ALIVE_TIMEOUT);
437 if (!err && scifdev_alive(ep))
438 goto retry;
439 if (!err)
440 err = -ENODEV;
441 if (err > 0)
442 err = 0;
443 mutex_lock(&ep->rma_info.rma_lock);
444 if (err < 0) {
445 if (fence_req->state == OP_IN_PROGRESS)
446 fence_req->state = OP_FAILED;
447 }
448 if (fence_req->state == OP_FAILED && !err)
449 err = -ENOMEM;
450 if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED)
451 *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark;
452 mutex_unlock(&ep->rma_info.rma_lock);
453error_free:
454 kfree(fence_req);
455error:
456 return err;
457}
458
459/**
460 * scif_send_fence_mark:
461 * @epd: end point descriptor.
462 * @out_mark: Output DMA mark reported by peer.
463 *
464 * Send a remote fence mark request.
465 */
466static int scif_send_fence_mark(scif_epd_t epd, int *out_mark)
467{
468 return _scif_send_fence(epd, SCIF_MARK, 0, out_mark);
469}
470
471/**
472 * scif_send_fence_wait:
473 * @epd: end point descriptor.
474 * @mark: DMA mark to wait for.
475 *
476 * Send a remote fence wait request.
477 */
478static int scif_send_fence_wait(scif_epd_t epd, int mark)
479{
480 return _scif_send_fence(epd, SCIF_WAIT, mark, NULL);
481}
482
483static int _scif_send_fence_signal_wait(struct scif_endpt *ep,
484 struct scif_fence_info *fence_req)
485{
486 int err;
487
488retry:
489 /* Wait for a SCIF_SIG_(N)ACK message */
490 err = wait_for_completion_timeout(&fence_req->comp,
491 SCIF_NODE_ALIVE_TIMEOUT);
492 if (!err && scifdev_alive(ep))
493 goto retry;
494 if (!err)
495 err = -ENODEV;
496 if (err > 0)
497 err = 0;
498 if (err < 0) {
499 mutex_lock(&ep->rma_info.rma_lock);
500 if (fence_req->state == OP_IN_PROGRESS)
501 fence_req->state = OP_FAILED;
502 mutex_unlock(&ep->rma_info.rma_lock);
503 }
504 if (fence_req->state == OP_FAILED && !err)
505 err = -ENXIO;
506 return err;
507}
508
509/**
510 * scif_send_fence_signal:
bb11b4eb
LJ
511 * @epd: endpoint descriptor
512 * @loff: local offset
513 * @lval: local value to write to loffset
514 * @roff: remote offset
515 * @rval: remote value to write to roffset
516 * @flags: flags
564c8d8d
SD
517 *
518 * Sends a remote fence signal request
519 */
520static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval,
521 off_t loff, u64 lval, int flags)
522{
523 int err = 0;
524 struct scifmsg msg;
525 struct scif_fence_info *fence_req;
526 struct scif_endpt *ep = (struct scif_endpt *)epd;
527
528 fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL);
529 if (!fence_req) {
530 err = -ENOMEM;
531 goto error;
532 }
533
534 fence_req->state = OP_IN_PROGRESS;
535 init_completion(&fence_req->comp);
536 msg.src = ep->port;
537 if (flags & SCIF_SIGNAL_LOCAL) {
538 msg.uop = SCIF_SIG_LOCAL;
539 msg.payload[0] = ep->remote_ep;
540 msg.payload[1] = roff;
541 msg.payload[2] = rval;
542 msg.payload[3] = (u64)fence_req;
543 spin_lock(&ep->lock);
544 if (ep->state == SCIFEP_CONNECTED)
545 err = scif_nodeqp_send(ep->remote_dev, &msg);
546 else
547 err = -ENOTCONN;
548 spin_unlock(&ep->lock);
549 if (err)
550 goto error_free;
551 err = _scif_send_fence_signal_wait(ep, fence_req);
552 if (err)
553 goto error_free;
554 }
555 fence_req->state = OP_IN_PROGRESS;
556
557 if (flags & SCIF_SIGNAL_REMOTE) {
558 msg.uop = SCIF_SIG_REMOTE;
559 msg.payload[0] = ep->remote_ep;
560 msg.payload[1] = loff;
561 msg.payload[2] = lval;
562 msg.payload[3] = (u64)fence_req;
563 spin_lock(&ep->lock);
564 if (ep->state == SCIFEP_CONNECTED)
565 err = scif_nodeqp_send(ep->remote_dev, &msg);
566 else
567 err = -ENOTCONN;
568 spin_unlock(&ep->lock);
569 if (err)
570 goto error_free;
571 err = _scif_send_fence_signal_wait(ep, fence_req);
572 }
573error_free:
574 kfree(fence_req);
575error:
576 return err;
577}
578
579static void scif_fence_mark_cb(void *arg)
580{
581 struct scif_endpt *ep = (struct scif_endpt *)arg;
582
583 wake_up_interruptible(&ep->rma_info.markwq);
584 atomic_dec(&ep->rma_info.fence_refcount);
585}
586
bb11b4eb 587/**
564c8d8d 588 * _scif_fence_mark:
bb11b4eb
LJ
589 * @epd: endpoint descriptor
590 * @mark: DMA mark to set-up
564c8d8d 591 *
564c8d8d
SD
592 * Set up a mark for this endpoint and return the value of the mark.
593 */
594int _scif_fence_mark(scif_epd_t epd, int *mark)
595{
596 struct scif_endpt *ep = (struct scif_endpt *)epd;
597 struct dma_chan *chan = ep->rma_info.dma_chan;
598 struct dma_device *ddev = chan->device;
599 struct dma_async_tx_descriptor *tx;
600 dma_cookie_t cookie;
601 int err;
602
603 tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE);
604 if (!tx) {
605 err = -ENOMEM;
606 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
607 __func__, __LINE__, err);
608 return err;
609 }
610 cookie = tx->tx_submit(tx);
611 if (dma_submit_error(cookie)) {
612 err = (int)cookie;
613 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
614 __func__, __LINE__, err);
615 return err;
616 }
617 dma_async_issue_pending(chan);
618 tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT);
619 if (!tx) {
620 err = -ENOMEM;
621 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
622 __func__, __LINE__, err);
623 return err;
624 }
625 tx->callback = scif_fence_mark_cb;
626 tx->callback_param = ep;
627 *mark = cookie = tx->tx_submit(tx);
628 if (dma_submit_error(cookie)) {
629 err = (int)cookie;
630 dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n",
631 __func__, __LINE__, err);
632 return err;
633 }
634 atomic_inc(&ep->rma_info.fence_refcount);
635 dma_async_issue_pending(chan);
636 return 0;
637}
638
639#define SCIF_LOOPB_MAGIC_MARK 0xdead
640
641int scif_fence_mark(scif_epd_t epd, int flags, int *mark)
642{
643 struct scif_endpt *ep = (struct scif_endpt *)epd;
644 int err = 0;
645
646 dev_dbg(scif_info.mdev.this_device,
647 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n",
648 ep, flags, *mark);
649 err = scif_verify_epd(ep);
650 if (err)
651 return err;
652
653 /* Invalid flags? */
654 if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))
655 return -EINVAL;
656
657 /* At least one of init self or peer RMA should be set */
658 if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
659 return -EINVAL;
660
661 /* Exactly one of init self or peer RMA should be set but not both */
662 if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
663 return -EINVAL;
664
665 /*
666 * Management node loopback does not need to use DMA.
667 * Return a valid mark to be symmetric.
668 */
669 if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
670 *mark = SCIF_LOOPB_MAGIC_MARK;
671 return 0;
672 }
673
674 if (flags & SCIF_FENCE_INIT_SELF)
675 err = _scif_fence_mark(epd, mark);
676 else
677 err = scif_send_fence_mark(ep, mark);
678
679 if (err)
680 dev_err(scif_info.mdev.this_device,
681 "%s %d err %d\n", __func__, __LINE__, err);
682 dev_dbg(scif_info.mdev.this_device,
683 "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n",
684 ep, flags, *mark, err);
685 return err;
686}
687EXPORT_SYMBOL_GPL(scif_fence_mark);
688
689int scif_fence_wait(scif_epd_t epd, int mark)
690{
691 struct scif_endpt *ep = (struct scif_endpt *)epd;
692 int err = 0;
693
694 dev_dbg(scif_info.mdev.this_device,
695 "SCIFAPI fence_wait: ep %p mark 0x%x\n",
696 ep, mark);
697 err = scif_verify_epd(ep);
698 if (err)
699 return err;
700 /*
701 * Management node loopback does not need to use DMA.
702 * The only valid mark provided is 0 so simply
703 * return success if the mark is valid.
704 */
705 if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) {
706 if (mark == SCIF_LOOPB_MAGIC_MARK)
707 return 0;
708 else
709 return -EINVAL;
710 }
711 if (mark & SCIF_REMOTE_FENCE)
712 err = scif_send_fence_wait(epd, mark);
713 else
714 err = _scif_fence_wait(epd, mark);
715 if (err < 0)
716 dev_err(scif_info.mdev.this_device,
717 "%s %d err %d\n", __func__, __LINE__, err);
718 return err;
719}
720EXPORT_SYMBOL_GPL(scif_fence_wait);
721
722int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval,
723 off_t roff, u64 rval, int flags)
724{
725 struct scif_endpt *ep = (struct scif_endpt *)epd;
726 int err = 0;
727
728 dev_dbg(scif_info.mdev.this_device,
729 "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n",
730 ep, loff, lval, roff, rval, flags);
731 err = scif_verify_epd(ep);
732 if (err)
733 return err;
734
735 /* Invalid flags? */
736 if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER |
737 SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))
738 return -EINVAL;
739
740 /* At least one of init self or peer RMA should be set */
741 if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)))
742 return -EINVAL;
743
744 /* Exactly one of init self or peer RMA should be set but not both */
745 if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER))
746 return -EINVAL;
747
748 /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */
749 if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)))
750 return -EINVAL;
751
752 /* Only Dword offsets allowed */
753 if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1)))
754 return -EINVAL;
755
756 /* Only Dword aligned offsets allowed */
757 if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1)))
758 return -EINVAL;
759
760 if (flags & SCIF_FENCE_INIT_PEER) {
761 err = scif_send_fence_signal(epd, roff, rval, loff,
762 lval, flags);
763 } else {
764 /* Local Signal in Local RAS */
765 if (flags & SCIF_SIGNAL_LOCAL) {
766 err = scif_prog_signal(epd, loff, lval,
767 SCIF_WINDOW_SELF);
768 if (err)
769 goto error_ret;
770 }
771
772 /* Signal in Remote RAS */
773 if (flags & SCIF_SIGNAL_REMOTE)
774 err = scif_prog_signal(epd, roff,
775 rval, SCIF_WINDOW_PEER);
776 }
777error_ret:
778 if (err)
779 dev_err(scif_info.mdev.this_device,
780 "%s %d err %d\n", __func__, __LINE__, err);
781 return err;
782}
783EXPORT_SYMBOL_GPL(scif_fence_signal);