Merge branches 'pm-core' and 'pm-domains'
[linux-2.6-block.git] / drivers / staging / lustre / lnet / lnet / lib-move.c
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
1dc563a6 30 * Copyright (c) 2011, 2015, Intel Corporation.
d7e09d03
PT
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 *
36 * lnet/lnet/lib-move.c
37 *
38 * Data movement routines
39 */
40
41#define DEBUG_SUBSYSTEM S_LNET
42
9fdaf8c0 43#include "../../include/linux/lnet/lib-lnet.h"
d7e09d03
PT
44
45static int local_nid_dist_zero = 1;
8cc7b4b9
PT
46module_param(local_nid_dist_zero, int, 0444);
47MODULE_PARM_DESC(local_nid_dist_zero, "Reserved");
d7e09d03
PT
48
49int
af66a6e2 50lnet_fail_nid(lnet_nid_t nid, unsigned int threshold)
d7e09d03 51{
7e7ab095 52 lnet_test_peer_t *tp;
24f69590 53 lnet_test_peer_t *temp;
7e7ab095
MS
54 struct list_head *el;
55 struct list_head *next;
56 struct list_head cull;
d7e09d03 57
d7e09d03 58 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
5fd88337 59 if (threshold) {
d7e09d03
PT
60 /* Adding a new entry */
61 LIBCFS_ALLOC(tp, sizeof(*tp));
06ace26e 62 if (!tp)
d7e09d03
PT
63 return -ENOMEM;
64
65 tp->tp_nid = nid;
66 tp->tp_threshold = threshold;
67
68 lnet_net_lock(0);
69 list_add_tail(&tp->tp_list, &the_lnet.ln_test_peers);
70 lnet_net_unlock(0);
71 return 0;
72 }
73
74 /* removing entries */
75 INIT_LIST_HEAD(&cull);
76
77 lnet_net_lock(0);
78
af66a6e2
LN
79 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
80 tp = list_entry(el, lnet_test_peer_t, tp_list);
d7e09d03 81
5fd88337 82 if (!tp->tp_threshold || /* needs culling anyway */
d7e09d03 83 nid == LNET_NID_ANY || /* removing all entries */
9b79ca85 84 tp->tp_nid == nid) { /* matched this one */
af66a6e2
LN
85 list_del(&tp->tp_list);
86 list_add(&tp->tp_list, &cull);
d7e09d03
PT
87 }
88 }
89
90 lnet_net_unlock(0);
91
24f69590 92 list_for_each_entry_safe(tp, temp, &cull, tp_list) {
af66a6e2
LN
93 list_del(&tp->tp_list);
94 LIBCFS_FREE(tp, sizeof(*tp));
d7e09d03
PT
95 }
96 return 0;
97}
98
99static int
af66a6e2 100fail_peer(lnet_nid_t nid, int outgoing)
d7e09d03
PT
101{
102 lnet_test_peer_t *tp;
24f69590 103 lnet_test_peer_t *temp;
7e7ab095
MS
104 struct list_head *el;
105 struct list_head *next;
106 struct list_head cull;
107 int fail = 0;
d7e09d03 108
af66a6e2 109 INIT_LIST_HEAD(&cull);
d7e09d03
PT
110
111 /* NB: use lnet_net_lock(0) to serialize operations on test peers */
112 lnet_net_lock(0);
113
af66a6e2
LN
114 list_for_each_safe(el, next, &the_lnet.ln_test_peers) {
115 tp = list_entry(el, lnet_test_peer_t, tp_list);
d7e09d03 116
5fd88337 117 if (!tp->tp_threshold) {
d7e09d03
PT
118 /* zombie entry */
119 if (outgoing) {
4420cfd3
JS
120 /*
121 * only cull zombies on outgoing tests,
d7e09d03 122 * since we may be at interrupt priority on
4420cfd3
JS
123 * incoming messages.
124 */
af66a6e2
LN
125 list_del(&tp->tp_list);
126 list_add(&tp->tp_list, &cull);
d7e09d03
PT
127 }
128 continue;
129 }
130
131 if (tp->tp_nid == LNET_NID_ANY || /* fail every peer */
132 nid == tp->tp_nid) { /* fail this peer */
133 fail = 1;
134
135 if (tp->tp_threshold != LNET_MD_THRESH_INF) {
136 tp->tp_threshold--;
137 if (outgoing &&
5fd88337 138 !tp->tp_threshold) {
d7e09d03 139 /* see above */
af66a6e2
LN
140 list_del(&tp->tp_list);
141 list_add(&tp->tp_list, &cull);
d7e09d03
PT
142 }
143 }
144 break;
145 }
146 }
147
148 lnet_net_unlock(0);
149
24f69590 150 list_for_each_entry_safe(tp, temp, &cull, tp_list) {
af66a6e2 151 list_del(&tp->tp_list);
d7e09d03 152
af66a6e2 153 LIBCFS_FREE(tp, sizeof(*tp));
d7e09d03
PT
154 }
155
2b5f2e44 156 return fail;
d7e09d03
PT
157}
158
159unsigned int
f351bad2 160lnet_iov_nob(unsigned int niov, struct kvec *iov)
d7e09d03
PT
161{
162 unsigned int nob = 0;
163
a739735c 164 LASSERT(!niov || iov);
d7e09d03
PT
165 while (niov-- > 0)
166 nob += (iov++)->iov_len;
167
2b5f2e44 168 return nob;
d7e09d03
PT
169}
170EXPORT_SYMBOL(lnet_iov_nob);
171
172void
f351bad2 173lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov, unsigned int doffset,
c314c319
JS
174 unsigned int nsiov, struct kvec *siov, unsigned int soffset,
175 unsigned int nob)
d7e09d03
PT
176{
177 /* NB diov, siov are READ-ONLY */
7e7ab095 178 unsigned int this_nob;
d7e09d03 179
5fd88337 180 if (!nob)
d7e09d03
PT
181 return;
182
183 /* skip complete frags before 'doffset' */
af66a6e2 184 LASSERT(ndiov > 0);
d7e09d03
PT
185 while (doffset >= diov->iov_len) {
186 doffset -= diov->iov_len;
187 diov++;
188 ndiov--;
af66a6e2 189 LASSERT(ndiov > 0);
d7e09d03
PT
190 }
191
192 /* skip complete frags before 'soffset' */
af66a6e2 193 LASSERT(nsiov > 0);
d7e09d03
PT
194 while (soffset >= siov->iov_len) {
195 soffset -= siov->iov_len;
196 siov++;
197 nsiov--;
af66a6e2 198 LASSERT(nsiov > 0);
d7e09d03
PT
199 }
200
201 do {
af66a6e2
LN
202 LASSERT(ndiov > 0);
203 LASSERT(nsiov > 0);
0c575417 204 this_nob = min(diov->iov_len - doffset,
d7e09d03 205 siov->iov_len - soffset);
0c575417 206 this_nob = min(this_nob, nob);
d7e09d03 207
af66a6e2 208 memcpy((char *)diov->iov_base + doffset,
c314c319 209 (char *)siov->iov_base + soffset, this_nob);
d7e09d03
PT
210 nob -= this_nob;
211
212 if (diov->iov_len > doffset + this_nob) {
213 doffset += this_nob;
214 } else {
215 diov++;
216 ndiov--;
217 doffset = 0;
218 }
219
220 if (siov->iov_len > soffset + this_nob) {
221 soffset += this_nob;
222 } else {
223 siov++;
224 nsiov--;
225 soffset = 0;
226 }
227 } while (nob > 0);
228}
229EXPORT_SYMBOL(lnet_copy_iov2iov);
230
231int
f351bad2 232lnet_extract_iov(int dst_niov, struct kvec *dst,
c314c319
JS
233 int src_niov, struct kvec *src,
234 unsigned int offset, unsigned int len)
d7e09d03 235{
4420cfd3
JS
236 /*
237 * Initialise 'dst' to the subset of 'src' starting at 'offset',
d7e09d03 238 * for exactly 'len' bytes, and return the number of entries.
4420cfd3
JS
239 * NB not destructive to 'src'
240 */
7e7ab095
MS
241 unsigned int frag_len;
242 unsigned int niov;
d7e09d03 243
5fd88337 244 if (!len) /* no data => */
2b5f2e44 245 return 0; /* no frags */
d7e09d03 246
af66a6e2 247 LASSERT(src_niov > 0);
d7e09d03
PT
248 while (offset >= src->iov_len) { /* skip initial frags */
249 offset -= src->iov_len;
250 src_niov--;
251 src++;
af66a6e2 252 LASSERT(src_niov > 0);
d7e09d03
PT
253 }
254
255 niov = 1;
256 for (;;) {
af66a6e2
LN
257 LASSERT(src_niov > 0);
258 LASSERT((int)niov <= dst_niov);
d7e09d03
PT
259
260 frag_len = src->iov_len - offset;
261 dst->iov_base = ((char *)src->iov_base) + offset;
262
263 if (len <= frag_len) {
264 dst->iov_len = len;
2b5f2e44 265 return niov;
d7e09d03
PT
266 }
267
268 dst->iov_len = frag_len;
269
270 len -= frag_len;
271 dst++;
272 src++;
273 niov++;
274 src_niov--;
275 offset = 0;
276 }
277}
278EXPORT_SYMBOL(lnet_extract_iov);
279
d7e09d03 280unsigned int
af66a6e2 281lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
d7e09d03 282{
7e7ab095 283 unsigned int nob = 0;
d7e09d03 284
a739735c 285 LASSERT(!niov || kiov);
d7e09d03
PT
286 while (niov-- > 0)
287 nob += (kiov++)->kiov_len;
288
2b5f2e44 289 return nob;
d7e09d03
PT
290}
291EXPORT_SYMBOL(lnet_kiov_nob);
292
293void
af66a6e2 294lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
ae4003f0
LN
295 unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
296 unsigned int nob)
d7e09d03
PT
297{
298 /* NB diov, siov are READ-ONLY */
7e7ab095
MS
299 unsigned int this_nob;
300 char *daddr = NULL;
301 char *saddr = NULL;
d7e09d03 302
5fd88337 303 if (!nob)
d7e09d03
PT
304 return;
305
af66a6e2 306 LASSERT(!in_interrupt());
d7e09d03 307
af66a6e2 308 LASSERT(ndiov > 0);
d7e09d03
PT
309 while (doffset >= diov->kiov_len) {
310 doffset -= diov->kiov_len;
311 diov++;
312 ndiov--;
af66a6e2 313 LASSERT(ndiov > 0);
d7e09d03
PT
314 }
315
af66a6e2 316 LASSERT(nsiov > 0);
d7e09d03
PT
317 while (soffset >= siov->kiov_len) {
318 soffset -= siov->kiov_len;
319 siov++;
320 nsiov--;
af66a6e2 321 LASSERT(nsiov > 0);
d7e09d03
PT
322 }
323
324 do {
af66a6e2
LN
325 LASSERT(ndiov > 0);
326 LASSERT(nsiov > 0);
0c575417 327 this_nob = min(diov->kiov_len - doffset,
d7e09d03 328 siov->kiov_len - soffset);
0c575417 329 this_nob = min(this_nob, nob);
d7e09d03 330
06ace26e 331 if (!daddr)
d7e09d03
PT
332 daddr = ((char *)kmap(diov->kiov_page)) +
333 diov->kiov_offset + doffset;
06ace26e 334 if (!saddr)
d7e09d03
PT
335 saddr = ((char *)kmap(siov->kiov_page)) +
336 siov->kiov_offset + soffset;
337
4420cfd3
JS
338 /*
339 * Vanishing risk of kmap deadlock when mapping 2 pages.
d7e09d03 340 * However in practice at least one of the kiovs will be mapped
4420cfd3
JS
341 * kernel pages and the map/unmap will be NOOPs
342 */
af66a6e2 343 memcpy(daddr, saddr, this_nob);
d7e09d03
PT
344 nob -= this_nob;
345
346 if (diov->kiov_len > doffset + this_nob) {
347 daddr += this_nob;
348 doffset += this_nob;
349 } else {
350 kunmap(diov->kiov_page);
351 daddr = NULL;
352 diov++;
353 ndiov--;
354 doffset = 0;
355 }
356
357 if (siov->kiov_len > soffset + this_nob) {
358 saddr += this_nob;
359 soffset += this_nob;
360 } else {
361 kunmap(siov->kiov_page);
362 saddr = NULL;
363 siov++;
364 nsiov--;
365 soffset = 0;
366 }
367 } while (nob > 0);
368
06ace26e 369 if (daddr)
d7e09d03 370 kunmap(diov->kiov_page);
06ace26e 371 if (saddr)
d7e09d03
PT
372 kunmap(siov->kiov_page);
373}
374EXPORT_SYMBOL(lnet_copy_kiov2kiov);
375
376void
f351bad2 377lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
ae4003f0
LN
378 unsigned int nkiov, lnet_kiov_t *kiov,
379 unsigned int kiovoffset, unsigned int nob)
d7e09d03
PT
380{
381 /* NB iov, kiov are READ-ONLY */
7e7ab095
MS
382 unsigned int this_nob;
383 char *addr = NULL;
d7e09d03 384
5fd88337 385 if (!nob)
d7e09d03
PT
386 return;
387
af66a6e2 388 LASSERT(!in_interrupt());
d7e09d03 389
af66a6e2 390 LASSERT(niov > 0);
d7e09d03
PT
391 while (iovoffset >= iov->iov_len) {
392 iovoffset -= iov->iov_len;
393 iov++;
394 niov--;
af66a6e2 395 LASSERT(niov > 0);
d7e09d03
PT
396 }
397
af66a6e2 398 LASSERT(nkiov > 0);
d7e09d03
PT
399 while (kiovoffset >= kiov->kiov_len) {
400 kiovoffset -= kiov->kiov_len;
401 kiov++;
402 nkiov--;
af66a6e2 403 LASSERT(nkiov > 0);
d7e09d03
PT
404 }
405
406 do {
af66a6e2
LN
407 LASSERT(niov > 0);
408 LASSERT(nkiov > 0);
fce6ad22
JM
409 this_nob = min(iov->iov_len - iovoffset,
410 (__kernel_size_t) kiov->kiov_len - kiovoffset);
0c575417 411 this_nob = min(this_nob, nob);
d7e09d03 412
06ace26e 413 if (!addr)
d7e09d03
PT
414 addr = ((char *)kmap(kiov->kiov_page)) +
415 kiov->kiov_offset + kiovoffset;
416
af66a6e2 417 memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
d7e09d03
PT
418 nob -= this_nob;
419
420 if (iov->iov_len > iovoffset + this_nob) {
421 iovoffset += this_nob;
422 } else {
423 iov++;
424 niov--;
425 iovoffset = 0;
426 }
427
428 if (kiov->kiov_len > kiovoffset + this_nob) {
429 addr += this_nob;
430 kiovoffset += this_nob;
431 } else {
432 kunmap(kiov->kiov_page);
433 addr = NULL;
434 kiov++;
435 nkiov--;
436 kiovoffset = 0;
437 }
438
439 } while (nob > 0);
440
06ace26e 441 if (addr)
d7e09d03
PT
442 kunmap(kiov->kiov_page);
443}
444EXPORT_SYMBOL(lnet_copy_kiov2iov);
445
446void
ae4003f0
LN
447lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
448 unsigned int kiovoffset, unsigned int niov,
f351bad2 449 struct kvec *iov, unsigned int iovoffset,
ae4003f0 450 unsigned int nob)
d7e09d03
PT
451{
452 /* NB kiov, iov are READ-ONLY */
7e7ab095
MS
453 unsigned int this_nob;
454 char *addr = NULL;
d7e09d03 455
5fd88337 456 if (!nob)
d7e09d03
PT
457 return;
458
af66a6e2 459 LASSERT(!in_interrupt());
d7e09d03 460
af66a6e2 461 LASSERT(nkiov > 0);
d7e09d03
PT
462 while (kiovoffset >= kiov->kiov_len) {
463 kiovoffset -= kiov->kiov_len;
464 kiov++;
465 nkiov--;
af66a6e2 466 LASSERT(nkiov > 0);
d7e09d03
PT
467 }
468
af66a6e2 469 LASSERT(niov > 0);
d7e09d03
PT
470 while (iovoffset >= iov->iov_len) {
471 iovoffset -= iov->iov_len;
472 iov++;
473 niov--;
af66a6e2 474 LASSERT(niov > 0);
d7e09d03
PT
475 }
476
477 do {
af66a6e2
LN
478 LASSERT(nkiov > 0);
479 LASSERT(niov > 0);
fce6ad22 480 this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
d7e09d03 481 iov->iov_len - iovoffset);
0c575417 482 this_nob = min(this_nob, nob);
d7e09d03 483
06ace26e 484 if (!addr)
d7e09d03
PT
485 addr = ((char *)kmap(kiov->kiov_page)) +
486 kiov->kiov_offset + kiovoffset;
487
af66a6e2 488 memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
d7e09d03
PT
489 nob -= this_nob;
490
491 if (kiov->kiov_len > kiovoffset + this_nob) {
492 addr += this_nob;
493 kiovoffset += this_nob;
494 } else {
495 kunmap(kiov->kiov_page);
496 addr = NULL;
497 kiov++;
498 nkiov--;
499 kiovoffset = 0;
500 }
501
502 if (iov->iov_len > iovoffset + this_nob) {
503 iovoffset += this_nob;
504 } else {
505 iov++;
506 niov--;
507 iovoffset = 0;
508 }
509 } while (nob > 0);
510
06ace26e 511 if (addr)
d7e09d03
PT
512 kunmap(kiov->kiov_page);
513}
514EXPORT_SYMBOL(lnet_copy_iov2kiov);
515
516int
af66a6e2 517lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
c314c319
JS
518 int src_niov, lnet_kiov_t *src,
519 unsigned int offset, unsigned int len)
d7e09d03 520{
4420cfd3
JS
521 /*
522 * Initialise 'dst' to the subset of 'src' starting at 'offset',
d7e09d03 523 * for exactly 'len' bytes, and return the number of entries.
4420cfd3
JS
524 * NB not destructive to 'src'
525 */
7e7ab095
MS
526 unsigned int frag_len;
527 unsigned int niov;
d7e09d03 528
5fd88337 529 if (!len) /* no data => */
2b5f2e44 530 return 0; /* no frags */
d7e09d03 531
af66a6e2 532 LASSERT(src_niov > 0);
d7e09d03
PT
533 while (offset >= src->kiov_len) { /* skip initial frags */
534 offset -= src->kiov_len;
535 src_niov--;
536 src++;
af66a6e2 537 LASSERT(src_niov > 0);
d7e09d03
PT
538 }
539
540 niov = 1;
541 for (;;) {
af66a6e2
LN
542 LASSERT(src_niov > 0);
543 LASSERT((int)niov <= dst_niov);
d7e09d03
PT
544
545 frag_len = src->kiov_len - offset;
546 dst->kiov_page = src->kiov_page;
547 dst->kiov_offset = src->kiov_offset + offset;
548
549 if (len <= frag_len) {
550 dst->kiov_len = len;
ae4003f0 551 LASSERT(dst->kiov_offset + dst->kiov_len
09cbfeaf 552 <= PAGE_SIZE);
2b5f2e44 553 return niov;
d7e09d03
PT
554 }
555
556 dst->kiov_len = frag_len;
09cbfeaf 557 LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
d7e09d03
PT
558
559 len -= frag_len;
560 dst++;
561 src++;
562 niov++;
563 src_niov--;
564 offset = 0;
565 }
566}
567EXPORT_SYMBOL(lnet_extract_kiov);
568
b7acfc95 569void
d7e09d03
PT
570lnet_ni_recv(lnet_ni_t *ni, void *private, lnet_msg_t *msg, int delayed,
571 unsigned int offset, unsigned int mlen, unsigned int rlen)
572{
7e7ab095 573 unsigned int niov = 0;
f351bad2 574 struct kvec *iov = NULL;
7e7ab095
MS
575 lnet_kiov_t *kiov = NULL;
576 int rc;
d7e09d03 577
af66a6e2 578 LASSERT(!in_interrupt());
5fd88337 579 LASSERT(!mlen || msg);
d7e09d03 580
06ace26e 581 if (msg) {
d7e09d03
PT
582 LASSERT(msg->msg_receiving);
583 LASSERT(!msg->msg_sending);
584 LASSERT(rlen == msg->msg_len);
585 LASSERT(mlen <= msg->msg_len);
586 LASSERT(msg->msg_offset == offset);
587 LASSERT(msg->msg_wanted == mlen);
588
589 msg->msg_receiving = 0;
590
5fd88337 591 if (mlen) {
d7e09d03
PT
592 niov = msg->msg_niov;
593 iov = msg->msg_iov;
594 kiov = msg->msg_kiov;
595
af66a6e2 596 LASSERT(niov > 0);
06ace26e 597 LASSERT(!iov != !kiov);
d7e09d03
PT
598 }
599 }
600
0eee6778
JS
601 rc = ni->ni_lnd->lnd_recv(ni, private, msg, delayed,
602 niov, iov, kiov, offset, mlen, rlen);
d7e09d03
PT
603 if (rc < 0)
604 lnet_finalize(ni, msg, rc);
605}
606
f526b20a 607static void
d7e09d03
PT
608lnet_setpayloadbuffer(lnet_msg_t *msg)
609{
610 lnet_libmd_t *md = msg->msg_md;
611
af66a6e2
LN
612 LASSERT(msg->msg_len > 0);
613 LASSERT(!msg->msg_routing);
06ace26e 614 LASSERT(md);
5fd88337 615 LASSERT(!msg->msg_niov);
06ace26e
JS
616 LASSERT(!msg->msg_iov);
617 LASSERT(!msg->msg_kiov);
d7e09d03
PT
618
619 msg->msg_niov = md->md_niov;
5fd88337 620 if (md->md_options & LNET_MD_KIOV)
d7e09d03
PT
621 msg->msg_kiov = md->md_iov.kiov;
622 else
623 msg->msg_iov = md->md_iov.iov;
624}
625
626void
627lnet_prep_send(lnet_msg_t *msg, int type, lnet_process_id_t target,
628 unsigned int offset, unsigned int len)
629{
630 msg->msg_type = type;
631 msg->msg_target = target;
632 msg->msg_len = len;
633 msg->msg_offset = offset;
634
5fd88337 635 if (len)
d7e09d03
PT
636 lnet_setpayloadbuffer(msg);
637
af66a6e2 638 memset(&msg->msg_hdr, 0, sizeof(msg->msg_hdr));
d7e09d03
PT
639 msg->msg_hdr.type = cpu_to_le32(type);
640 msg->msg_hdr.dest_nid = cpu_to_le64(target.nid);
641 msg->msg_hdr.dest_pid = cpu_to_le32(target.pid);
642 /* src_nid will be set later */
643 msg->msg_hdr.src_pid = cpu_to_le32(the_lnet.ln_pid);
644 msg->msg_hdr.payload_length = cpu_to_le32(len);
645}
646
f526b20a 647static void
d7e09d03
PT
648lnet_ni_send(lnet_ni_t *ni, lnet_msg_t *msg)
649{
7e7ab095
MS
650 void *priv = msg->msg_private;
651 int rc;
d7e09d03 652
af66a6e2
LN
653 LASSERT(!in_interrupt());
654 LASSERT(LNET_NETTYP(LNET_NIDNET(ni->ni_nid)) == LOLND ||
c314c319 655 (msg->msg_txcredit && msg->msg_peertxcredit));
d7e09d03 656
0eee6778 657 rc = ni->ni_lnd->lnd_send(ni, priv, msg);
d7e09d03
PT
658 if (rc < 0)
659 lnet_finalize(ni, msg, rc);
660}
661
f526b20a 662static int
d7e09d03
PT
663lnet_ni_eager_recv(lnet_ni_t *ni, lnet_msg_t *msg)
664{
7e7ab095 665 int rc;
d7e09d03
PT
666
667 LASSERT(!msg->msg_sending);
668 LASSERT(msg->msg_receiving);
669 LASSERT(!msg->msg_rx_ready_delay);
06ace26e 670 LASSERT(ni->ni_lnd->lnd_eager_recv);
d7e09d03
PT
671
672 msg->msg_rx_ready_delay = 1;
0eee6778
JS
673 rc = ni->ni_lnd->lnd_eager_recv(ni, msg->msg_private, msg,
674 &msg->msg_private);
5fd88337 675 if (rc) {
2d00bd17 676 CERROR("recv from %s / send to %s aborted: eager_recv failed %d\n",
d7e09d03
PT
677 libcfs_nid2str(msg->msg_rxpeer->lp_nid),
678 libcfs_id2str(msg->msg_target), rc);
679 LASSERT(rc < 0); /* required by my callers */
680 }
681
682 return rc;
683}
684
685/* NB: caller shall hold a ref on 'lp' as I'd drop lnet_net_lock */
f526b20a 686static void
d7e09d03
PT
687lnet_ni_query_locked(lnet_ni_t *ni, lnet_peer_t *lp)
688{
a649ad1d 689 unsigned long last_alive = 0;
d7e09d03
PT
690
691 LASSERT(lnet_peer_aliveness_enabled(lp));
06ace26e 692 LASSERT(ni->ni_lnd->lnd_query);
d7e09d03
PT
693
694 lnet_net_unlock(lp->lp_cpt);
0eee6778 695 ni->ni_lnd->lnd_query(ni, lp->lp_nid, &last_alive);
d7e09d03
PT
696 lnet_net_lock(lp->lp_cpt);
697
698 lp->lp_last_query = cfs_time_current();
699
5fd88337 700 if (last_alive) /* NI has updated timestamp */
d7e09d03
PT
701 lp->lp_last_alive = last_alive;
702}
703
704/* NB: always called with lnet_net_lock held */
705static inline int
a649ad1d 706lnet_peer_is_alive(lnet_peer_t *lp, unsigned long now)
d7e09d03 707{
7e7ab095 708 int alive;
a649ad1d 709 unsigned long deadline;
d7e09d03 710
af66a6e2 711 LASSERT(lnet_peer_aliveness_enabled(lp));
d7e09d03
PT
712
713 /* Trust lnet_notify() if it has more recent aliveness news, but
714 * ignore the initial assumed death (see lnet_peers_start_down()).
715 */
716 if (!lp->lp_alive && lp->lp_alive_count > 0 &&
717 cfs_time_aftereq(lp->lp_timestamp, lp->lp_last_alive))
718 return 0;
719
720 deadline = cfs_time_add(lp->lp_last_alive,
721 cfs_time_seconds(lp->lp_ni->ni_peertimeout));
722 alive = cfs_time_after(deadline, now);
723
724 /* Update obsolete lp_alive except for routers assumed to be dead
725 * initially, because router checker would update aliveness in this
726 * case, and moreover lp_last_alive at peer creation is assumed.
727 */
728 if (alive && !lp->lp_alive &&
5fd88337 729 !(lnet_isrouter(lp) && !lp->lp_alive_count))
d7e09d03
PT
730 lnet_notify_locked(lp, 0, 1, lp->lp_last_alive);
731
732 return alive;
733}
734
4420cfd3
JS
735/*
736 * NB: returns 1 when alive, 0 when dead, negative when error;
737 * may drop the lnet_net_lock
738 */
f526b20a 739static int
af66a6e2 740lnet_peer_alive_locked(lnet_peer_t *lp)
d7e09d03 741{
a649ad1d 742 unsigned long now = cfs_time_current();
d7e09d03
PT
743
744 if (!lnet_peer_aliveness_enabled(lp))
745 return -ENODEV;
746
747 if (lnet_peer_is_alive(lp, now))
748 return 1;
749
4420cfd3
JS
750 /*
751 * Peer appears dead, but we should avoid frequent NI queries (at
752 * most once per lnet_queryinterval seconds).
753 */
5fd88337 754 if (lp->lp_last_query) {
d7e09d03
PT
755 static const int lnet_queryinterval = 1;
756
a649ad1d 757 unsigned long next_query =
d7e09d03
PT
758 cfs_time_add(lp->lp_last_query,
759 cfs_time_seconds(lnet_queryinterval));
760
699503bc 761 if (time_before(now, next_query)) {
d7e09d03 762 if (lp->lp_alive)
2d00bd17 763 CWARN("Unexpected aliveness of peer %s: %d < %d (%d/%d)\n",
d7e09d03
PT
764 libcfs_nid2str(lp->lp_nid),
765 (int)now, (int)next_query,
766 lnet_queryinterval,
767 lp->lp_ni->ni_peertimeout);
768 return 0;
769 }
770 }
771
772 /* query NI for latest aliveness news */
773 lnet_ni_query_locked(lp->lp_ni, lp);
774
775 if (lnet_peer_is_alive(lp, now))
776 return 1;
777
778 lnet_notify_locked(lp, 0, 0, lp->lp_last_alive);
779 return 0;
780}
781
dee2857e
IH
782/**
783 * \param msg The message to be sent.
784 * \param do_send True if lnet_ni_send() should be called in this function.
785 * lnet_send() is going to lnet_net_unlock immediately after this, so
786 * it sets do_send FALSE and I don't do the unlock/send/lock bit.
787 *
ec5fb5be
LZ
788 * \retval LNET_CREDIT_OK If \a msg sent or OK to send.
789 * \retval LNET_CREDIT_WAIT If \a msg blocked for credit.
790 * \retval -EHOSTUNREACH If the next hop of the message appears dead.
791 * \retval -ECANCELED If the MD of the message has been unlinked.
dee2857e
IH
792 */
793static int
d7e09d03
PT
794lnet_post_send_locked(lnet_msg_t *msg, int do_send)
795{
7e7ab095
MS
796 lnet_peer_t *lp = msg->msg_txpeer;
797 lnet_ni_t *ni = lp->lp_ni;
798 int cpt = msg->msg_tx_cpt;
799 struct lnet_tx_queue *tq = ni->ni_tx_queues[cpt];
d7e09d03
PT
800
801 /* non-lnet_send() callers have checked before */
802 LASSERT(!do_send || msg->msg_tx_delayed);
803 LASSERT(!msg->msg_receiving);
804 LASSERT(msg->msg_tx_committed);
805
d7e09d03 806 /* NB 'lp' is always the next hop */
5fd88337
JS
807 if (!(msg->msg_target.pid & LNET_PID_USERFLAG) &&
808 !lnet_peer_alive_locked(lp)) {
d7e09d03
PT
809 the_lnet.ln_counters[cpt]->drop_count++;
810 the_lnet.ln_counters[cpt]->drop_length += msg->msg_len;
811 lnet_net_unlock(cpt);
812
813 CNETERR("Dropping message for %s: peer not alive\n",
814 libcfs_id2str(msg->msg_target));
815 if (do_send)
816 lnet_finalize(ni, msg, -EHOSTUNREACH);
817
818 lnet_net_lock(cpt);
ec5fb5be 819 return -EHOSTUNREACH;
d7e09d03
PT
820 }
821
06ace26e 822 if (msg->msg_md &&
5fd88337 823 (msg->msg_md->md_flags & LNET_MD_FLAG_ABORTED)) {
dee2857e
IH
824 lnet_net_unlock(cpt);
825
2d00bd17 826 CNETERR("Aborting message for %s: LNetM[DE]Unlink() already called on the MD/ME.\n",
dee2857e
IH
827 libcfs_id2str(msg->msg_target));
828 if (do_send)
829 lnet_finalize(ni, msg, -ECANCELED);
830
831 lnet_net_lock(cpt);
ec5fb5be 832 return -ECANCELED;
dee2857e
IH
833 }
834
d7e09d03 835 if (!msg->msg_peertxcredit) {
af66a6e2 836 LASSERT((lp->lp_txcredits < 0) ==
c314c319 837 !list_empty(&lp->lp_txq));
d7e09d03
PT
838
839 msg->msg_peertxcredit = 1;
840 lp->lp_txqnob += msg->msg_len + sizeof(lnet_hdr_t);
841 lp->lp_txcredits--;
842
843 if (lp->lp_txcredits < lp->lp_mintxcredits)
844 lp->lp_mintxcredits = lp->lp_txcredits;
845
846 if (lp->lp_txcredits < 0) {
847 msg->msg_tx_delayed = 1;
848 list_add_tail(&msg->msg_list, &lp->lp_txq);
ec5fb5be 849 return LNET_CREDIT_WAIT;
d7e09d03
PT
850 }
851 }
852
853 if (!msg->msg_txcredit) {
854 LASSERT((tq->tq_credits < 0) ==
855 !list_empty(&tq->tq_delayed));
856
857 msg->msg_txcredit = 1;
858 tq->tq_credits--;
859
860 if (tq->tq_credits < tq->tq_credits_min)
861 tq->tq_credits_min = tq->tq_credits;
862
863 if (tq->tq_credits < 0) {
864 msg->msg_tx_delayed = 1;
865 list_add_tail(&msg->msg_list, &tq->tq_delayed);
ec5fb5be 866 return LNET_CREDIT_WAIT;
d7e09d03
PT
867 }
868 }
869
870 if (do_send) {
871 lnet_net_unlock(cpt);
872 lnet_ni_send(ni, msg);
873 lnet_net_lock(cpt);
874 }
ec5fb5be 875 return LNET_CREDIT_OK;
d7e09d03
PT
876}
877
f526b20a 878static lnet_rtrbufpool_t *
d7e09d03
PT
879lnet_msg2bufpool(lnet_msg_t *msg)
880{
7e7ab095
MS
881 lnet_rtrbufpool_t *rbp;
882 int cpt;
d7e09d03
PT
883
884 LASSERT(msg->msg_rx_committed);
885
886 cpt = msg->msg_rx_cpt;
887 rbp = &the_lnet.ln_rtrpools[cpt][0];
888
889 LASSERT(msg->msg_len <= LNET_MTU);
09cbfeaf 890 while (msg->msg_len > (unsigned int)rbp->rbp_npages * PAGE_SIZE) {
d7e09d03
PT
891 rbp++;
892 LASSERT(rbp < &the_lnet.ln_rtrpools[cpt][LNET_NRBPOOLS]);
893 }
894
895 return rbp;
896}
897
f526b20a 898static int
af66a6e2 899lnet_post_routed_recv_locked(lnet_msg_t *msg, int do_recv)
d7e09d03 900{
4420cfd3
JS
901 /*
902 * lnet_parse is going to lnet_net_unlock immediately after this, so it
ec5fb5be
LZ
903 * sets do_recv FALSE and I don't do the unlock/send/lock bit.
904 * I return LNET_CREDIT_WAIT if msg blocked and LNET_CREDIT_OK if
905 * received or OK to receive
4420cfd3 906 */
7e7ab095
MS
907 lnet_peer_t *lp = msg->msg_rxpeer;
908 lnet_rtrbufpool_t *rbp;
909 lnet_rtrbuf_t *rb;
d7e09d03 910
06ace26e
JS
911 LASSERT(!msg->msg_iov);
912 LASSERT(!msg->msg_kiov);
5fd88337 913 LASSERT(!msg->msg_niov);
af66a6e2
LN
914 LASSERT(msg->msg_routing);
915 LASSERT(msg->msg_receiving);
916 LASSERT(!msg->msg_sending);
d7e09d03
PT
917
918 /* non-lnet_parse callers only receive delayed messages */
919 LASSERT(!do_recv || msg->msg_rx_delayed);
920
921 if (!msg->msg_peerrtrcredit) {
af66a6e2 922 LASSERT((lp->lp_rtrcredits < 0) ==
c314c319 923 !list_empty(&lp->lp_rtrq));
d7e09d03
PT
924
925 msg->msg_peerrtrcredit = 1;
926 lp->lp_rtrcredits--;
927 if (lp->lp_rtrcredits < lp->lp_minrtrcredits)
928 lp->lp_minrtrcredits = lp->lp_rtrcredits;
929
930 if (lp->lp_rtrcredits < 0) {
931 /* must have checked eager_recv before here */
932 LASSERT(msg->msg_rx_ready_delay);
933 msg->msg_rx_delayed = 1;
934 list_add_tail(&msg->msg_list, &lp->lp_rtrq);
ec5fb5be 935 return LNET_CREDIT_WAIT;
d7e09d03
PT
936 }
937 }
938
939 rbp = lnet_msg2bufpool(msg);
940
941 if (!msg->msg_rtrcredit) {
d7e09d03
PT
942 msg->msg_rtrcredit = 1;
943 rbp->rbp_credits--;
944 if (rbp->rbp_credits < rbp->rbp_mincredits)
945 rbp->rbp_mincredits = rbp->rbp_credits;
946
947 if (rbp->rbp_credits < 0) {
948 /* must have checked eager_recv before here */
949 LASSERT(msg->msg_rx_ready_delay);
950 msg->msg_rx_delayed = 1;
951 list_add_tail(&msg->msg_list, &rbp->rbp_msgs);
ec5fb5be 952 return LNET_CREDIT_WAIT;
d7e09d03
PT
953 }
954 }
955
af66a6e2 956 LASSERT(!list_empty(&rbp->rbp_bufs));
d7e09d03
PT
957 rb = list_entry(rbp->rbp_bufs.next, lnet_rtrbuf_t, rb_list);
958 list_del(&rb->rb_list);
959
960 msg->msg_niov = rbp->rbp_npages;
961 msg->msg_kiov = &rb->rb_kiov[0];
962
963 if (do_recv) {
964 int cpt = msg->msg_rx_cpt;
965
966 lnet_net_unlock(cpt);
967 lnet_ni_recv(lp->lp_ni, msg->msg_private, msg, 1,
968 0, msg->msg_len, msg->msg_len);
969 lnet_net_lock(cpt);
970 }
ec5fb5be 971 return LNET_CREDIT_OK;
d7e09d03
PT
972}
973
974void
975lnet_return_tx_credits_locked(lnet_msg_t *msg)
976{
7e7ab095
MS
977 lnet_peer_t *txpeer = msg->msg_txpeer;
978 lnet_msg_t *msg2;
d7e09d03
PT
979
980 if (msg->msg_txcredit) {
7e7ab095 981 struct lnet_ni *ni = txpeer->lp_ni;
d7e09d03
PT
982 struct lnet_tx_queue *tq = ni->ni_tx_queues[msg->msg_tx_cpt];
983
984 /* give back NI txcredits */
985 msg->msg_txcredit = 0;
986
987 LASSERT((tq->tq_credits < 0) ==
988 !list_empty(&tq->tq_delayed));
989
990 tq->tq_credits++;
991 if (tq->tq_credits <= 0) {
992 msg2 = list_entry(tq->tq_delayed.next,
c314c319 993 lnet_msg_t, msg_list);
d7e09d03
PT
994 list_del(&msg2->msg_list);
995
996 LASSERT(msg2->msg_txpeer->lp_ni == ni);
997 LASSERT(msg2->msg_tx_delayed);
998
999 (void) lnet_post_send_locked(msg2, 1);
1000 }
1001 }
1002
1003 if (msg->msg_peertxcredit) {
1004 /* give back peer txcredits */
1005 msg->msg_peertxcredit = 0;
1006
1007 LASSERT((txpeer->lp_txcredits < 0) ==
1008 !list_empty(&txpeer->lp_txq));
1009
1010 txpeer->lp_txqnob -= msg->msg_len + sizeof(lnet_hdr_t);
af66a6e2 1011 LASSERT(txpeer->lp_txqnob >= 0);
d7e09d03
PT
1012
1013 txpeer->lp_txcredits++;
1014 if (txpeer->lp_txcredits <= 0) {
1015 msg2 = list_entry(txpeer->lp_txq.next,
c314c319 1016 lnet_msg_t, msg_list);
d7e09d03
PT
1017 list_del(&msg2->msg_list);
1018
1019 LASSERT(msg2->msg_txpeer == txpeer);
1020 LASSERT(msg2->msg_tx_delayed);
1021
1022 (void) lnet_post_send_locked(msg2, 1);
1023 }
1024 }
1025
06ace26e 1026 if (txpeer) {
d7e09d03
PT
1027 msg->msg_txpeer = NULL;
1028 lnet_peer_decref_locked(txpeer);
1029 }
1030}
1031
86ef6250
AS
1032void
1033lnet_schedule_blocked_locked(lnet_rtrbufpool_t *rbp)
1034{
1035 lnet_msg_t *msg;
1036
1037 if (list_empty(&rbp->rbp_msgs))
1038 return;
1039 msg = list_entry(rbp->rbp_msgs.next,
1040 lnet_msg_t, msg_list);
1041 list_del(&msg->msg_list);
1042
1043 (void)lnet_post_routed_recv_locked(msg, 1);
1044}
1045
1046void
1047lnet_drop_routed_msgs_locked(struct list_head *list, int cpt)
1048{
1049 struct list_head drop;
1050 lnet_msg_t *msg;
1051 lnet_msg_t *tmp;
1052
1053 INIT_LIST_HEAD(&drop);
1054
1055 list_splice_init(list, &drop);
1056
1057 lnet_net_unlock(cpt);
1058
1059 list_for_each_entry_safe(msg, tmp, &drop, msg_list) {
1060 lnet_ni_recv(msg->msg_rxpeer->lp_ni, msg->msg_private, NULL,
1061 0, 0, 0, msg->msg_hdr.payload_length);
1062 list_del_init(&msg->msg_list);
1063 lnet_finalize(NULL, msg, -ECANCELED);
1064 }
1065
1066 lnet_net_lock(cpt);
1067}
1068
d7e09d03
PT
1069void
1070lnet_return_rx_credits_locked(lnet_msg_t *msg)
1071{
7e7ab095
MS
1072 lnet_peer_t *rxpeer = msg->msg_rxpeer;
1073 lnet_msg_t *msg2;
d7e09d03
PT
1074
1075 if (msg->msg_rtrcredit) {
1076 /* give back global router credits */
7e7ab095 1077 lnet_rtrbuf_t *rb;
d7e09d03
PT
1078 lnet_rtrbufpool_t *rbp;
1079
4420cfd3
JS
1080 /*
1081 * NB If a msg ever blocks for a buffer in rbp_msgs, it stays
d7e09d03 1082 * there until it gets one allocated, or aborts the wait
4420cfd3
JS
1083 * itself
1084 */
06ace26e 1085 LASSERT(msg->msg_kiov);
d7e09d03
PT
1086
1087 rb = list_entry(msg->msg_kiov, lnet_rtrbuf_t, rb_kiov[0]);
1088 rbp = rb->rb_pool;
d7e09d03
PT
1089
1090 msg->msg_kiov = NULL;
1091 msg->msg_rtrcredit = 0;
1092
86ef6250
AS
1093 LASSERT(rbp == lnet_msg2bufpool(msg));
1094
d7e09d03
PT
1095 LASSERT((rbp->rbp_credits > 0) ==
1096 !list_empty(&rbp->rbp_bufs));
1097
86ef6250
AS
1098 /*
1099 * If routing is now turned off, we just drop this buffer and
1100 * don't bother trying to return credits.
1101 */
1102 if (!the_lnet.ln_routing) {
1103 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
1104 goto routing_off;
1105 }
d7e09d03 1106
86ef6250
AS
1107 /*
1108 * It is possible that a user has lowered the desired number of
1109 * buffers in this pool. Make sure we never put back
1110 * more buffers than the stated number.
1111 */
95fc2938 1112 if (unlikely(rbp->rbp_credits >= rbp->rbp_req_nbuffers)) {
86ef6250
AS
1113 /* Discard this buffer so we don't have too many. */
1114 lnet_destroy_rtrbuf(rb, rbp->rbp_npages);
95fc2938 1115 rbp->rbp_nbuffers--;
86ef6250
AS
1116 } else {
1117 list_add(&rb->rb_list, &rbp->rbp_bufs);
1118 rbp->rbp_credits++;
1119 if (rbp->rbp_credits <= 0)
1120 lnet_schedule_blocked_locked(rbp);
d7e09d03
PT
1121 }
1122 }
1123
86ef6250 1124routing_off:
d7e09d03
PT
1125 if (msg->msg_peerrtrcredit) {
1126 /* give back peer router credits */
1127 msg->msg_peerrtrcredit = 0;
1128
1129 LASSERT((rxpeer->lp_rtrcredits < 0) ==
1130 !list_empty(&rxpeer->lp_rtrq));
1131
1132 rxpeer->lp_rtrcredits++;
86ef6250
AS
1133 /*
1134 * drop all messages which are queued to be routed on that
1135 * peer.
1136 */
1137 if (!the_lnet.ln_routing) {
1138 lnet_drop_routed_msgs_locked(&rxpeer->lp_rtrq,
1139 msg->msg_rx_cpt);
1140 } else if (rxpeer->lp_rtrcredits <= 0) {
d7e09d03 1141 msg2 = list_entry(rxpeer->lp_rtrq.next,
c314c319 1142 lnet_msg_t, msg_list);
d7e09d03
PT
1143 list_del(&msg2->msg_list);
1144
1145 (void) lnet_post_routed_recv_locked(msg2, 1);
1146 }
1147 }
06ace26e 1148 if (rxpeer) {
d7e09d03
PT
1149 msg->msg_rxpeer = NULL;
1150 lnet_peer_decref_locked(rxpeer);
1151 }
1152}
1153
1154static int
1155lnet_compare_routes(lnet_route_t *r1, lnet_route_t *r2)
1156{
1157 lnet_peer_t *p1 = r1->lr_gateway;
1158 lnet_peer_t *p2 = r2->lr_gateway;
b9bbb61c
AS
1159 int r1_hops = (r1->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r1->lr_hops;
1160 int r2_hops = (r2->lr_hops == LNET_UNDEFINED_HOPS) ? 1 : r2->lr_hops;
d7e09d03 1161
e75fb87f
DO
1162 if (r1->lr_priority < r2->lr_priority)
1163 return 1;
1164
1165 if (r1->lr_priority > r2->lr_priority)
58cb2ad3 1166 return -ERANGE;
e75fb87f 1167
b9bbb61c 1168 if (r1_hops < r2_hops)
d7e09d03
PT
1169 return 1;
1170
b9bbb61c 1171 if (r1_hops > r2_hops)
58cb2ad3 1172 return -ERANGE;
d7e09d03
PT
1173
1174 if (p1->lp_txqnob < p2->lp_txqnob)
1175 return 1;
1176
1177 if (p1->lp_txqnob > p2->lp_txqnob)
58cb2ad3 1178 return -ERANGE;
d7e09d03
PT
1179
1180 if (p1->lp_txcredits > p2->lp_txcredits)
1181 return 1;
1182
1183 if (p1->lp_txcredits < p2->lp_txcredits)
58cb2ad3 1184 return -ERANGE;
d7e09d03
PT
1185
1186 if (r1->lr_seq - r2->lr_seq <= 0)
1187 return 1;
1188
58cb2ad3 1189 return -ERANGE;
d7e09d03
PT
1190}
1191
1192static lnet_peer_t *
1193lnet_find_route_locked(lnet_ni_t *ni, lnet_nid_t target, lnet_nid_t rtr_nid)
1194{
7e7ab095 1195 lnet_remotenet_t *rnet;
4f0bedec
CH
1196 lnet_route_t *route;
1197 lnet_route_t *best_route;
1198 lnet_route_t *last_route;
7e7ab095
MS
1199 struct lnet_peer *lp_best;
1200 struct lnet_peer *lp;
1201 int rc;
d7e09d03 1202
4420cfd3
JS
1203 /*
1204 * If @rtr_nid is not LNET_NID_ANY, return the gateway with
1205 * rtr_nid nid, otherwise find the best gateway I can use
1206 */
d7e09d03 1207 rnet = lnet_find_net_locked(LNET_NIDNET(target));
06ace26e 1208 if (!rnet)
d7e09d03
PT
1209 return NULL;
1210
1211 lp_best = NULL;
4f0bedec
CH
1212 best_route = NULL;
1213 last_route = NULL;
1214 list_for_each_entry(route, &rnet->lrn_routes, lr_list) {
1215 lp = route->lr_gateway;
d7e09d03 1216
4ee23a84 1217 if (!lnet_is_route_alive(route))
d7e09d03
PT
1218 continue;
1219
06ace26e 1220 if (ni && lp->lp_ni != ni)
d7e09d03
PT
1221 continue;
1222
1223 if (lp->lp_nid == rtr_nid) /* it's pre-determined router */
1224 return lp;
1225
06ace26e 1226 if (!lp_best) {
4f0bedec
CH
1227 best_route = route;
1228 last_route = route;
d7e09d03
PT
1229 lp_best = lp;
1230 continue;
1231 }
1232
1233 /* no protection on below fields, but it's harmless */
4f0bedec
CH
1234 if (last_route->lr_seq - route->lr_seq < 0)
1235 last_route = route;
d7e09d03 1236
4f0bedec 1237 rc = lnet_compare_routes(route, best_route);
d7e09d03
PT
1238 if (rc < 0)
1239 continue;
1240
4f0bedec 1241 best_route = route;
d7e09d03
PT
1242 lp_best = lp;
1243 }
1244
4420cfd3
JS
1245 /*
1246 * set sequence number on the best router to the latest sequence + 1
d7e09d03 1247 * so we can round-robin all routers, it's race and inaccurate but
4420cfd3
JS
1248 * harmless and functional
1249 */
4f0bedec
CH
1250 if (best_route)
1251 best_route->lr_seq = last_route->lr_seq + 1;
d7e09d03
PT
1252 return lp_best;
1253}
1254
1255int
1256lnet_send(lnet_nid_t src_nid, lnet_msg_t *msg, lnet_nid_t rtr_nid)
1257{
7e7ab095
MS
1258 lnet_nid_t dst_nid = msg->msg_target.nid;
1259 struct lnet_ni *src_ni;
1260 struct lnet_ni *local_ni;
1261 struct lnet_peer *lp;
1262 int cpt;
1263 int cpt2;
1264 int rc;
d7e09d03 1265
4420cfd3
JS
1266 /*
1267 * NB: rtr_nid is set to LNET_NID_ANY for all current use-cases,
d7e09d03 1268 * but we might want to use pre-determined router for ACK/REPLY
4420cfd3
JS
1269 * in the future
1270 */
06ace26e
JS
1271 /* NB: ni == interface pre-determined (ACK/REPLY) */
1272 LASSERT(!msg->msg_txpeer);
af66a6e2
LN
1273 LASSERT(!msg->msg_sending);
1274 LASSERT(!msg->msg_target_is_router);
1275 LASSERT(!msg->msg_receiving);
d7e09d03
PT
1276
1277 msg->msg_sending = 1;
1278
1279 LASSERT(!msg->msg_tx_committed);
1280 cpt = lnet_cpt_of_nid(rtr_nid == LNET_NID_ANY ? dst_nid : rtr_nid);
1281 again:
1282 lnet_net_lock(cpt);
1283
1284 if (the_lnet.ln_shutdown) {
1285 lnet_net_unlock(cpt);
1286 return -ESHUTDOWN;
1287 }
1288
1289 if (src_nid == LNET_NID_ANY) {
1290 src_ni = NULL;
1291 } else {
1292 src_ni = lnet_nid2ni_locked(src_nid, cpt);
06ace26e 1293 if (!src_ni) {
d7e09d03 1294 lnet_net_unlock(cpt);
2d00bd17
JP
1295 LCONSOLE_WARN("Can't send to %s: src %s is not a local nid\n",
1296 libcfs_nid2str(dst_nid),
d7e09d03
PT
1297 libcfs_nid2str(src_nid));
1298 return -EINVAL;
1299 }
af66a6e2 1300 LASSERT(!msg->msg_routing);
d7e09d03
PT
1301 }
1302
1303 /* Is this for someone on a local network? */
1304 local_ni = lnet_net2ni_locked(LNET_NIDNET(dst_nid), cpt);
1305
06ace26e
JS
1306 if (local_ni) {
1307 if (!src_ni) {
d7e09d03
PT
1308 src_ni = local_ni;
1309 src_nid = src_ni->ni_nid;
1310 } else if (src_ni == local_ni) {
1311 lnet_ni_decref_locked(local_ni, cpt);
1312 } else {
1313 lnet_ni_decref_locked(local_ni, cpt);
1314 lnet_ni_decref_locked(src_ni, cpt);
1315 lnet_net_unlock(cpt);
1316 LCONSOLE_WARN("No route to %s via from %s\n",
1317 libcfs_nid2str(dst_nid),
1318 libcfs_nid2str(src_nid));
1319 return -EINVAL;
1320 }
1321
1322 LASSERT(src_nid != LNET_NID_ANY);
1323 lnet_msg_commit(msg, cpt);
1324
1325 if (!msg->msg_routing)
1326 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1327
1328 if (src_ni == the_lnet.ln_loni) {
1329 /* No send credit hassles with LOLND */
1330 lnet_net_unlock(cpt);
1331 lnet_ni_send(src_ni, msg);
1332
1333 lnet_net_lock(cpt);
1334 lnet_ni_decref_locked(src_ni, cpt);
1335 lnet_net_unlock(cpt);
1336 return 0;
1337 }
1338
1339 rc = lnet_nid2peer_locked(&lp, dst_nid, cpt);
1340 /* lp has ref on src_ni; lose mine */
1341 lnet_ni_decref_locked(src_ni, cpt);
5fd88337 1342 if (rc) {
d7e09d03
PT
1343 lnet_net_unlock(cpt);
1344 LCONSOLE_WARN("Error %d finding peer %s\n", rc,
1345 libcfs_nid2str(dst_nid));
1346 /* ENOMEM or shutting down */
1347 return rc;
1348 }
af66a6e2 1349 LASSERT(lp->lp_ni == src_ni);
d7e09d03
PT
1350 } else {
1351 /* sending to a remote network */
1352 lp = lnet_find_route_locked(src_ni, dst_nid, rtr_nid);
06ace26e
JS
1353 if (!lp) {
1354 if (src_ni)
d7e09d03
PT
1355 lnet_ni_decref_locked(src_ni, cpt);
1356 lnet_net_unlock(cpt);
1357
2d00bd17 1358 LCONSOLE_WARN("No route to %s via %s (all routers down)\n",
d7e09d03
PT
1359 libcfs_id2str(msg->msg_target),
1360 libcfs_nid2str(src_nid));
1361 return -EHOSTUNREACH;
1362 }
1363
4420cfd3
JS
1364 /*
1365 * rtr_nid is LNET_NID_ANY or NID of pre-determined router,
d7e09d03
PT
1366 * it's possible that rtr_nid isn't LNET_NID_ANY and lp isn't
1367 * pre-determined router, this can happen if router table
4420cfd3
JS
1368 * was changed when we release the lock
1369 */
d7e09d03
PT
1370 if (rtr_nid != lp->lp_nid) {
1371 cpt2 = lnet_cpt_of_nid_locked(lp->lp_nid);
1372 if (cpt2 != cpt) {
06ace26e 1373 if (src_ni)
d7e09d03
PT
1374 lnet_ni_decref_locked(src_ni, cpt);
1375 lnet_net_unlock(cpt);
1376
1377 rtr_nid = lp->lp_nid;
1378 cpt = cpt2;
1379 goto again;
1380 }
1381 }
1382
1383 CDEBUG(D_NET, "Best route to %s via %s for %s %d\n",
1384 libcfs_nid2str(dst_nid), libcfs_nid2str(lp->lp_nid),
1385 lnet_msgtyp2str(msg->msg_type), msg->msg_len);
1386
06ace26e 1387 if (!src_ni) {
d7e09d03
PT
1388 src_ni = lp->lp_ni;
1389 src_nid = src_ni->ni_nid;
1390 } else {
af66a6e2 1391 LASSERT(src_ni == lp->lp_ni);
d7e09d03
PT
1392 lnet_ni_decref_locked(src_ni, cpt);
1393 }
1394
1395 lnet_peer_addref_locked(lp);
1396
1397 LASSERT(src_nid != LNET_NID_ANY);
1398 lnet_msg_commit(msg, cpt);
1399
1400 if (!msg->msg_routing) {
1401 /* I'm the source and now I know which NI to send on */
1402 msg->msg_hdr.src_nid = cpu_to_le64(src_nid);
1403 }
1404
1405 msg->msg_target_is_router = 1;
1406 msg->msg_target.nid = lp->lp_nid;
fe7cb65d 1407 msg->msg_target.pid = LNET_PID_LUSTRE;
d7e09d03
PT
1408 }
1409
1410 /* 'lp' is our best choice of peer */
1411
af66a6e2
LN
1412 LASSERT(!msg->msg_peertxcredit);
1413 LASSERT(!msg->msg_txcredit);
06ace26e 1414 LASSERT(!msg->msg_txpeer);
d7e09d03
PT
1415
1416 msg->msg_txpeer = lp; /* msg takes my ref on lp */
1417
1418 rc = lnet_post_send_locked(msg, 0);
1419 lnet_net_unlock(cpt);
1420
ec5fb5be
LZ
1421 if (rc < 0)
1422 return rc;
d7e09d03 1423
ec5fb5be 1424 if (rc == LNET_CREDIT_OK)
d7e09d03
PT
1425 lnet_ni_send(src_ni, msg);
1426
ec5fb5be 1427 return 0; /* rc == LNET_CREDIT_OK or LNET_CREDIT_WAIT */
d7e09d03
PT
1428}
1429
b7acfc95 1430void
d7e09d03
PT
1431lnet_drop_message(lnet_ni_t *ni, int cpt, void *private, unsigned int nob)
1432{
1433 lnet_net_lock(cpt);
1434 the_lnet.ln_counters[cpt]->drop_count++;
1435 the_lnet.ln_counters[cpt]->drop_length += nob;
1436 lnet_net_unlock(cpt);
1437
1438 lnet_ni_recv(ni, private, NULL, 0, 0, 0, nob);
1439}
1440
1441static void
1442lnet_recv_put(lnet_ni_t *ni, lnet_msg_t *msg)
1443{
7e7ab095 1444 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1445
5fd88337 1446 if (msg->msg_wanted)
d7e09d03
PT
1447 lnet_setpayloadbuffer(msg);
1448
1449 lnet_build_msg_event(msg, LNET_EVENT_PUT);
1450
4420cfd3
JS
1451 /*
1452 * Must I ACK? If so I'll grab the ack_wmd out of the header and put
1453 * it back into the ACK during lnet_finalize()
1454 */
5fd88337
JS
1455 msg->msg_ack = !lnet_is_wire_handle_none(&hdr->msg.put.ack_wmd) &&
1456 !(msg->msg_md->md_options & LNET_MD_ACK_DISABLE);
d7e09d03
PT
1457
1458 lnet_ni_recv(ni, msg->msg_private, msg, msg->msg_rx_delayed,
1459 msg->msg_offset, msg->msg_wanted, hdr->payload_length);
1460}
1461
1462static int
1463lnet_parse_put(lnet_ni_t *ni, lnet_msg_t *msg)
1464{
7e7ab095
MS
1465 lnet_hdr_t *hdr = &msg->msg_hdr;
1466 struct lnet_match_info info;
5b16d52b 1467 bool ready_delay;
7e7ab095 1468 int rc;
d7e09d03
PT
1469
1470 /* Convert put fields to host byte order */
1471 hdr->msg.put.match_bits = le64_to_cpu(hdr->msg.put.match_bits);
1472 hdr->msg.put.ptl_index = le32_to_cpu(hdr->msg.put.ptl_index);
1473 hdr->msg.put.offset = le32_to_cpu(hdr->msg.put.offset);
1474
1475 info.mi_id.nid = hdr->src_nid;
1476 info.mi_id.pid = hdr->src_pid;
1477 info.mi_opc = LNET_MD_OP_PUT;
1478 info.mi_portal = hdr->msg.put.ptl_index;
1479 info.mi_rlength = hdr->payload_length;
1480 info.mi_roffset = hdr->msg.put.offset;
1481 info.mi_mbits = hdr->msg.put.match_bits;
1482
06ace26e 1483 msg->msg_rx_ready_delay = !ni->ni_lnd->lnd_eager_recv;
5b16d52b 1484 ready_delay = msg->msg_rx_ready_delay;
d7e09d03
PT
1485
1486 again:
1487 rc = lnet_ptl_match_md(&info, msg);
1488 switch (rc) {
1489 default:
1490 LBUG();
1491
1492 case LNET_MATCHMD_OK:
1493 lnet_recv_put(ni, msg);
1494 return 0;
1495
1496 case LNET_MATCHMD_NONE:
5b16d52b
LZ
1497 /**
1498 * no eager_recv or has already called it, should
1499 * have been attached on delayed list
1500 */
1501 if (ready_delay)
d7e09d03
PT
1502 return 0;
1503
1504 rc = lnet_ni_eager_recv(ni, msg);
5b16d52b
LZ
1505 if (!rc) {
1506 ready_delay = true;
d7e09d03 1507 goto again;
5b16d52b 1508 }
d7e09d03
PT
1509 /* fall through */
1510
1511 case LNET_MATCHMD_DROP:
b0f5aad5 1512 CNETERR("Dropping PUT from %s portal %d match %llu offset %d length %d: %d\n",
d7e09d03
PT
1513 libcfs_id2str(info.mi_id), info.mi_portal,
1514 info.mi_mbits, info.mi_roffset, info.mi_rlength, rc);
1515
58cb2ad3 1516 return -ENOENT; /* -ve: OK but no match */
d7e09d03
PT
1517 }
1518}
1519
1520static int
1521lnet_parse_get(lnet_ni_t *ni, lnet_msg_t *msg, int rdma_get)
1522{
7e7ab095
MS
1523 struct lnet_match_info info;
1524 lnet_hdr_t *hdr = &msg->msg_hdr;
1525 lnet_handle_wire_t reply_wmd;
1526 int rc;
d7e09d03
PT
1527
1528 /* Convert get fields to host byte order */
7e7ab095
MS
1529 hdr->msg.get.match_bits = le64_to_cpu(hdr->msg.get.match_bits);
1530 hdr->msg.get.ptl_index = le32_to_cpu(hdr->msg.get.ptl_index);
1531 hdr->msg.get.sink_length = le32_to_cpu(hdr->msg.get.sink_length);
1532 hdr->msg.get.src_offset = le32_to_cpu(hdr->msg.get.src_offset);
1533
1534 info.mi_id.nid = hdr->src_nid;
1535 info.mi_id.pid = hdr->src_pid;
1536 info.mi_opc = LNET_MD_OP_GET;
1537 info.mi_portal = hdr->msg.get.ptl_index;
1538 info.mi_rlength = hdr->msg.get.sink_length;
1539 info.mi_roffset = hdr->msg.get.src_offset;
1540 info.mi_mbits = hdr->msg.get.match_bits;
d7e09d03
PT
1541
1542 rc = lnet_ptl_match_md(&info, msg);
1543 if (rc == LNET_MATCHMD_DROP) {
b0f5aad5 1544 CNETERR("Dropping GET from %s portal %d match %llu offset %d length %d\n",
d7e09d03
PT
1545 libcfs_id2str(info.mi_id), info.mi_portal,
1546 info.mi_mbits, info.mi_roffset, info.mi_rlength);
58cb2ad3 1547 return -ENOENT; /* -ve: OK but no match */
d7e09d03
PT
1548 }
1549
1550 LASSERT(rc == LNET_MATCHMD_OK);
1551
1552 lnet_build_msg_event(msg, LNET_EVENT_GET);
1553
1554 reply_wmd = hdr->msg.get.return_wmd;
1555
1556 lnet_prep_send(msg, LNET_MSG_REPLY, info.mi_id,
1557 msg->msg_offset, msg->msg_wanted);
1558
1559 msg->msg_hdr.msg.reply.dst_wmd = reply_wmd;
1560
1561 if (rdma_get) {
1562 /* The LND completes the REPLY from her recv procedure */
1563 lnet_ni_recv(ni, msg->msg_private, msg, 0,
1564 msg->msg_offset, msg->msg_len, msg->msg_len);
1565 return 0;
1566 }
1567
1568 lnet_ni_recv(ni, msg->msg_private, NULL, 0, 0, 0, 0);
1569 msg->msg_receiving = 0;
1570
1571 rc = lnet_send(ni->ni_nid, msg, LNET_NID_ANY);
1572 if (rc < 0) {
1573 /* didn't get as far as lnet_ni_send() */
1574 CERROR("%s: Unable to send REPLY for GET from %s: %d\n",
1575 libcfs_nid2str(ni->ni_nid),
1576 libcfs_id2str(info.mi_id), rc);
1577
1578 lnet_finalize(ni, msg, rc);
1579 }
1580
1581 return 0;
1582}
1583
1584static int
1585lnet_parse_reply(lnet_ni_t *ni, lnet_msg_t *msg)
1586{
7e7ab095
MS
1587 void *private = msg->msg_private;
1588 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1589 lnet_process_id_t src = {0};
7e7ab095
MS
1590 lnet_libmd_t *md;
1591 int rlength;
1592 int mlength;
1593 int cpt;
d7e09d03
PT
1594
1595 cpt = lnet_cpt_of_cookie(hdr->msg.reply.dst_wmd.wh_object_cookie);
1596 lnet_res_lock(cpt);
1597
1598 src.nid = hdr->src_nid;
1599 src.pid = hdr->src_pid;
1600
1601 /* NB handles only looked up by creator (no flips) */
1602 md = lnet_wire_handle2md(&hdr->msg.reply.dst_wmd);
5fd88337 1603 if (!md || !md->md_threshold || md->md_me) {
55f5a824 1604 CNETERR("%s: Dropping REPLY from %s for %s MD %#llx.%#llx\n",
d7e09d03 1605 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
06ace26e 1606 !md ? "invalid" : "inactive",
d7e09d03
PT
1607 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1608 hdr->msg.reply.dst_wmd.wh_object_cookie);
06ace26e 1609 if (md && md->md_me)
d7e09d03
PT
1610 CERROR("REPLY MD also attached to portal %d\n",
1611 md->md_me->me_portal);
1612
1613 lnet_res_unlock(cpt);
58cb2ad3 1614 return -ENOENT; /* -ve: OK but no match */
d7e09d03
PT
1615 }
1616
5fd88337 1617 LASSERT(!md->md_offset);
d7e09d03
PT
1618
1619 rlength = hdr->payload_length;
005b23d6 1620 mlength = min_t(uint, rlength, md->md_length);
d7e09d03
PT
1621
1622 if (mlength < rlength &&
5fd88337 1623 !(md->md_options & LNET_MD_TRUNCATE)) {
55f5a824 1624 CNETERR("%s: Dropping REPLY from %s length %d for MD %#llx would overflow (%d)\n",
d7e09d03
PT
1625 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1626 rlength, hdr->msg.reply.dst_wmd.wh_object_cookie,
1627 mlength);
1628 lnet_res_unlock(cpt);
58cb2ad3 1629 return -ENOENT; /* -ve: OK but no match */
d7e09d03
PT
1630 }
1631
55f5a824 1632 CDEBUG(D_NET, "%s: Reply from %s of length %d/%d into md %#llx\n",
d7e09d03
PT
1633 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1634 mlength, rlength, hdr->msg.reply.dst_wmd.wh_object_cookie);
1635
1636 lnet_msg_attach_md(msg, md, 0, mlength);
1637
5fd88337 1638 if (mlength)
d7e09d03
PT
1639 lnet_setpayloadbuffer(msg);
1640
1641 lnet_res_unlock(cpt);
1642
1643 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
1644
1645 lnet_ni_recv(ni, private, msg, 0, 0, mlength, rlength);
1646 return 0;
1647}
1648
1649static int
1650lnet_parse_ack(lnet_ni_t *ni, lnet_msg_t *msg)
1651{
7e7ab095 1652 lnet_hdr_t *hdr = &msg->msg_hdr;
d7e09d03 1653 lnet_process_id_t src = {0};
7e7ab095
MS
1654 lnet_libmd_t *md;
1655 int cpt;
d7e09d03
PT
1656
1657 src.nid = hdr->src_nid;
1658 src.pid = hdr->src_pid;
1659
1660 /* Convert ack fields to host byte order */
1661 hdr->msg.ack.match_bits = le64_to_cpu(hdr->msg.ack.match_bits);
1662 hdr->msg.ack.mlength = le32_to_cpu(hdr->msg.ack.mlength);
1663
1664 cpt = lnet_cpt_of_cookie(hdr->msg.ack.dst_wmd.wh_object_cookie);
1665 lnet_res_lock(cpt);
1666
1667 /* NB handles only looked up by creator (no flips) */
1668 md = lnet_wire_handle2md(&hdr->msg.ack.dst_wmd);
5fd88337 1669 if (!md || !md->md_threshold || md->md_me) {
d7e09d03
PT
1670 /* Don't moan; this is expected */
1671 CDEBUG(D_NET,
55f5a824 1672 "%s: Dropping ACK from %s to %s MD %#llx.%#llx\n",
d7e09d03 1673 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
06ace26e 1674 !md ? "invalid" : "inactive",
d7e09d03
PT
1675 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1676 hdr->msg.ack.dst_wmd.wh_object_cookie);
06ace26e 1677 if (md && md->md_me)
d7e09d03
PT
1678 CERROR("Source MD also attached to portal %d\n",
1679 md->md_me->me_portal);
1680
1681 lnet_res_unlock(cpt);
58cb2ad3 1682 return -ENOENT; /* -ve! */
d7e09d03
PT
1683 }
1684
55f5a824 1685 CDEBUG(D_NET, "%s: ACK from %s into md %#llx\n",
d7e09d03
PT
1686 libcfs_nid2str(ni->ni_nid), libcfs_id2str(src),
1687 hdr->msg.ack.dst_wmd.wh_object_cookie);
1688
1689 lnet_msg_attach_md(msg, md, 0, 0);
1690
1691 lnet_res_unlock(cpt);
1692
1693 lnet_build_msg_event(msg, LNET_EVENT_ACK);
1694
1695 lnet_ni_recv(ni, msg->msg_private, msg, 0, 0, 0, msg->msg_len);
1696 return 0;
1697}
1698
ec5fb5be
LZ
1699/**
1700 * \retval LNET_CREDIT_OK If \a msg is forwarded
1701 * \retval LNET_CREDIT_WAIT If \a msg is blocked because w/o buffer
1702 * \retval -ve error code
1703 */
b7acfc95 1704int
d7e09d03
PT
1705lnet_parse_forward_locked(lnet_ni_t *ni, lnet_msg_t *msg)
1706{
7e7ab095 1707 int rc = 0;
d7e09d03 1708
86ef6250
AS
1709 if (!the_lnet.ln_routing)
1710 return -ECANCELED;
1711
d7e09d03
PT
1712 if (msg->msg_rxpeer->lp_rtrcredits <= 0 ||
1713 lnet_msg2bufpool(msg)->rbp_credits <= 0) {
06ace26e 1714 if (!ni->ni_lnd->lnd_eager_recv) {
d7e09d03
PT
1715 msg->msg_rx_ready_delay = 1;
1716 } else {
1717 lnet_net_unlock(msg->msg_rx_cpt);
1718 rc = lnet_ni_eager_recv(ni, msg);
1719 lnet_net_lock(msg->msg_rx_cpt);
1720 }
1721 }
1722
5fd88337 1723 if (!rc)
d7e09d03
PT
1724 rc = lnet_post_routed_recv_locked(msg, 0);
1725 return rc;
1726}
1727
b7acfc95
LZ
1728int
1729lnet_parse_local(lnet_ni_t *ni, lnet_msg_t *msg)
1730{
1731 int rc;
1732
1733 switch (msg->msg_type) {
1734 case LNET_MSG_ACK:
1735 rc = lnet_parse_ack(ni, msg);
1736 break;
1737 case LNET_MSG_PUT:
1738 rc = lnet_parse_put(ni, msg);
1739 break;
1740 case LNET_MSG_GET:
1741 rc = lnet_parse_get(ni, msg, msg->msg_rdma_get);
1742 break;
1743 case LNET_MSG_REPLY:
1744 rc = lnet_parse_reply(ni, msg);
1745 break;
1746 default: /* prevent an unused label if !kernel */
1747 LASSERT(0);
1748 return -EPROTO;
1749 }
1750
1751 LASSERT(!rc || rc == -ENOENT);
1752 return rc;
1753}
1754
d7e09d03 1755char *
af66a6e2 1756lnet_msgtyp2str(int type)
d7e09d03
PT
1757{
1758 switch (type) {
1759 case LNET_MSG_ACK:
2b5f2e44 1760 return "ACK";
d7e09d03 1761 case LNET_MSG_PUT:
2b5f2e44 1762 return "PUT";
d7e09d03 1763 case LNET_MSG_GET:
2b5f2e44 1764 return "GET";
d7e09d03 1765 case LNET_MSG_REPLY:
2b5f2e44 1766 return "REPLY";
d7e09d03 1767 case LNET_MSG_HELLO:
2b5f2e44 1768 return "HELLO";
d7e09d03 1769 default:
2b5f2e44 1770 return "<UNKNOWN>";
d7e09d03
PT
1771 }
1772}
d7e09d03
PT
1773
1774void
51f01fab 1775lnet_print_hdr(lnet_hdr_t *hdr)
d7e09d03
PT
1776{
1777 lnet_process_id_t src = {0};
1778 lnet_process_id_t dst = {0};
af66a6e2 1779 char *type_str = lnet_msgtyp2str(hdr->type);
d7e09d03
PT
1780
1781 src.nid = hdr->src_nid;
1782 src.pid = hdr->src_pid;
1783
1784 dst.nid = hdr->dest_nid;
1785 dst.pid = hdr->dest_pid;
1786
1787 CWARN("P3 Header at %p of type %s\n", hdr, type_str);
1788 CWARN(" From %s\n", libcfs_id2str(src));
1789 CWARN(" To %s\n", libcfs_id2str(dst));
1790
1791 switch (hdr->type) {
1792 default:
1793 break;
1794
1795 case LNET_MSG_PUT:
2d00bd17 1796 CWARN(" Ptl index %d, ack md %#llx.%#llx, match bits %llu\n",
d7e09d03
PT
1797 hdr->msg.put.ptl_index,
1798 hdr->msg.put.ack_wmd.wh_interface_cookie,
1799 hdr->msg.put.ack_wmd.wh_object_cookie,
1800 hdr->msg.put.match_bits);
55f5a824 1801 CWARN(" Length %d, offset %d, hdr data %#llx\n",
d7e09d03
PT
1802 hdr->payload_length, hdr->msg.put.offset,
1803 hdr->msg.put.hdr_data);
1804 break;
1805
1806 case LNET_MSG_GET:
2d00bd17
JP
1807 CWARN(" Ptl index %d, return md %#llx.%#llx, match bits %llu\n",
1808 hdr->msg.get.ptl_index,
d7e09d03
PT
1809 hdr->msg.get.return_wmd.wh_interface_cookie,
1810 hdr->msg.get.return_wmd.wh_object_cookie,
1811 hdr->msg.get.match_bits);
1812 CWARN(" Length %d, src offset %d\n",
1813 hdr->msg.get.sink_length,
1814 hdr->msg.get.src_offset);
1815 break;
1816
1817 case LNET_MSG_ACK:
2d00bd17 1818 CWARN(" dst md %#llx.%#llx, manipulated length %d\n",
d7e09d03
PT
1819 hdr->msg.ack.dst_wmd.wh_interface_cookie,
1820 hdr->msg.ack.dst_wmd.wh_object_cookie,
1821 hdr->msg.ack.mlength);
1822 break;
1823
1824 case LNET_MSG_REPLY:
2d00bd17 1825 CWARN(" dst md %#llx.%#llx, length %d\n",
d7e09d03
PT
1826 hdr->msg.reply.dst_wmd.wh_interface_cookie,
1827 hdr->msg.reply.dst_wmd.wh_object_cookie,
1828 hdr->payload_length);
1829 }
d7e09d03
PT
1830}
1831
1832int
1833lnet_parse(lnet_ni_t *ni, lnet_hdr_t *hdr, lnet_nid_t from_nid,
1834 void *private, int rdma_req)
1835{
7e7ab095
MS
1836 int rc = 0;
1837 int cpt;
1838 int for_me;
1839 struct lnet_msg *msg;
1840 lnet_pid_t dest_pid;
1841 lnet_nid_t dest_nid;
1842 lnet_nid_t src_nid;
1843 __u32 payload_length;
1844 __u32 type;
d7e09d03 1845
af66a6e2 1846 LASSERT(!in_interrupt());
d7e09d03
PT
1847
1848 type = le32_to_cpu(hdr->type);
1849 src_nid = le64_to_cpu(hdr->src_nid);
1850 dest_nid = le64_to_cpu(hdr->dest_nid);
1851 dest_pid = le32_to_cpu(hdr->dest_pid);
1852 payload_length = le32_to_cpu(hdr->payload_length);
1853
1854 for_me = (ni->ni_nid == dest_nid);
1855 cpt = lnet_cpt_of_nid(from_nid);
1856
1857 switch (type) {
1858 case LNET_MSG_ACK:
1859 case LNET_MSG_GET:
1860 if (payload_length > 0) {
1861 CERROR("%s, src %s: bad %s payload %d (0 expected)\n",
1862 libcfs_nid2str(from_nid),
1863 libcfs_nid2str(src_nid),
1864 lnet_msgtyp2str(type), payload_length);
1865 return -EPROTO;
1866 }
1867 break;
1868
1869 case LNET_MSG_PUT:
1870 case LNET_MSG_REPLY:
ae4003f0
LN
1871 if (payload_length >
1872 (__u32)(for_me ? LNET_MAX_PAYLOAD : LNET_MTU)) {
2d00bd17 1873 CERROR("%s, src %s: bad %s payload %d (%d max expected)\n",
d7e09d03
PT
1874 libcfs_nid2str(from_nid),
1875 libcfs_nid2str(src_nid),
1876 lnet_msgtyp2str(type),
1877 payload_length,
1878 for_me ? LNET_MAX_PAYLOAD : LNET_MTU);
1879 return -EPROTO;
1880 }
1881 break;
1882
1883 default:
1884 CERROR("%s, src %s: Bad message type 0x%x\n",
1885 libcfs_nid2str(from_nid),
1886 libcfs_nid2str(src_nid), type);
1887 return -EPROTO;
1888 }
1889
1890 if (the_lnet.ln_routing &&
ec0067d1 1891 ni->ni_last_alive != ktime_get_real_seconds()) {
d7e09d03 1892 /* NB: so far here is the only place to set NI status to "up */
86ef6250 1893 lnet_ni_lock(ni);
ec0067d1 1894 ni->ni_last_alive = ktime_get_real_seconds();
06ace26e 1895 if (ni->ni_status &&
d7e09d03
PT
1896 ni->ni_status->ns_status == LNET_NI_STATUS_DOWN)
1897 ni->ni_status->ns_status = LNET_NI_STATUS_UP;
1898 lnet_ni_unlock(ni);
1899 }
1900
4420cfd3
JS
1901 /*
1902 * Regard a bad destination NID as a protocol error. Senders should
d7e09d03 1903 * know what they're doing; if they don't they're misconfigured, buggy
4420cfd3
JS
1904 * or malicious so we chop them off at the knees :)
1905 */
d7e09d03
PT
1906 if (!for_me) {
1907 if (LNET_NIDNET(dest_nid) == LNET_NIDNET(ni->ni_nid)) {
1908 /* should have gone direct */
2d00bd17
JP
1909 CERROR("%s, src %s: Bad dest nid %s (should have been sent direct)\n",
1910 libcfs_nid2str(from_nid),
1911 libcfs_nid2str(src_nid),
1912 libcfs_nid2str(dest_nid));
d7e09d03
PT
1913 return -EPROTO;
1914 }
1915
1916 if (lnet_islocalnid(dest_nid)) {
4420cfd3
JS
1917 /*
1918 * dest is another local NI; sender should have used
1919 * this node's NID on its own network
1920 */
2d00bd17
JP
1921 CERROR("%s, src %s: Bad dest nid %s (it's my nid but on a different network)\n",
1922 libcfs_nid2str(from_nid),
1923 libcfs_nid2str(src_nid),
1924 libcfs_nid2str(dest_nid));
d7e09d03
PT
1925 return -EPROTO;
1926 }
1927
1928 if (rdma_req && type == LNET_MSG_GET) {
2d00bd17
JP
1929 CERROR("%s, src %s: Bad optimized GET for %s (final destination must be me)\n",
1930 libcfs_nid2str(from_nid),
1931 libcfs_nid2str(src_nid),
1932 libcfs_nid2str(dest_nid));
d7e09d03
PT
1933 return -EPROTO;
1934 }
1935
1936 if (!the_lnet.ln_routing) {
2d00bd17
JP
1937 CERROR("%s, src %s: Dropping message for %s (routing not enabled)\n",
1938 libcfs_nid2str(from_nid),
1939 libcfs_nid2str(src_nid),
1940 libcfs_nid2str(dest_nid));
d7e09d03
PT
1941 goto drop;
1942 }
1943 }
1944
4420cfd3
JS
1945 /*
1946 * Message looks OK; we're not going to return an error, so we MUST
1947 * call back lnd_recv() come what may...
1948 */
af66a6e2 1949 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 1950 fail_peer(src_nid, 0)) { /* shall we now? */
d7e09d03
PT
1951 CERROR("%s, src %s: Dropping %s to simulate failure\n",
1952 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1953 lnet_msgtyp2str(type));
1954 goto drop;
1955 }
1956
0fbbced2
LZ
1957 if (!list_empty(&the_lnet.ln_drop_rules) &&
1958 lnet_drop_rule_match(hdr)) {
1959 CDEBUG(D_NET, "%s, src %s, dst %s: Dropping %s to simulate silent message loss\n",
1960 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1961 libcfs_nid2str(dest_nid), lnet_msgtyp2str(type));
1962 goto drop;
1963 }
1964
d7e09d03 1965 msg = lnet_msg_alloc();
06ace26e 1966 if (!msg) {
d7e09d03
PT
1967 CERROR("%s, src %s: Dropping %s (out of memory)\n",
1968 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
1969 lnet_msgtyp2str(type));
1970 goto drop;
1971 }
1972
ae4003f0
LN
1973 /* msg zeroed in lnet_msg_alloc;
1974 * i.e. flags all clear, pointers NULL etc
1975 */
d7e09d03
PT
1976 msg->msg_type = type;
1977 msg->msg_private = private;
1978 msg->msg_receiving = 1;
b7acfc95 1979 msg->msg_rdma_get = rdma_req;
d3d3d37a
JS
1980 msg->msg_wanted = payload_length;
1981 msg->msg_len = payload_length;
d7e09d03
PT
1982 msg->msg_offset = 0;
1983 msg->msg_hdr = *hdr;
1984 /* for building message event */
1985 msg->msg_from = from_nid;
1986 if (!for_me) {
1987 msg->msg_target.pid = dest_pid;
1988 msg->msg_target.nid = dest_nid;
1989 msg->msg_routing = 1;
1990
1991 } else {
1992 /* convert common msg->hdr fields to host byteorder */
1993 msg->msg_hdr.type = type;
1994 msg->msg_hdr.src_nid = src_nid;
1995 msg->msg_hdr.src_pid = le32_to_cpu(msg->msg_hdr.src_pid);
1996 msg->msg_hdr.dest_nid = dest_nid;
1997 msg->msg_hdr.dest_pid = dest_pid;
1998 msg->msg_hdr.payload_length = payload_length;
1999 }
2000
2001 lnet_net_lock(cpt);
2002 rc = lnet_nid2peer_locked(&msg->msg_rxpeer, from_nid, cpt);
5fd88337 2003 if (rc) {
d7e09d03 2004 lnet_net_unlock(cpt);
2d00bd17 2005 CERROR("%s, src %s: Dropping %s (error %d looking up sender)\n",
d7e09d03
PT
2006 libcfs_nid2str(from_nid), libcfs_nid2str(src_nid),
2007 lnet_msgtyp2str(type), rc);
2008 lnet_msg_free(msg);
2009 goto drop;
2010 }
2011
af3fa7c7
LZ
2012 if (lnet_isrouter(msg->msg_rxpeer)) {
2013 lnet_peer_set_alive(msg->msg_rxpeer);
2014 if (avoid_asym_router_failure &&
2015 LNET_NIDNET(src_nid) != LNET_NIDNET(from_nid)) {
2016 /* received a remote message from router, update
2017 * remote NI status on this router.
2018 * NB: multi-hop routed message will be ignored.
2019 */
2020 lnet_router_ni_update_locked(msg->msg_rxpeer,
2021 LNET_NIDNET(src_nid));
2022 }
2023 }
2024
d7e09d03
PT
2025 lnet_msg_commit(msg, cpt);
2026
b7acfc95
LZ
2027 /* message delay simulation */
2028 if (unlikely(!list_empty(&the_lnet.ln_delay_rules) &&
2029 lnet_delay_rule_match_locked(hdr, msg))) {
2030 lnet_net_unlock(cpt);
2031 return 0;
2032 }
2033
d7e09d03
PT
2034 if (!for_me) {
2035 rc = lnet_parse_forward_locked(ni, msg);
2036 lnet_net_unlock(cpt);
2037
2038 if (rc < 0)
2039 goto free_drop;
ec5fb5be
LZ
2040
2041 if (rc == LNET_CREDIT_OK) {
d7e09d03
PT
2042 lnet_ni_recv(ni, msg->msg_private, msg, 0,
2043 0, payload_length, payload_length);
2044 }
2045 return 0;
2046 }
2047
2048 lnet_net_unlock(cpt);
2049
b7acfc95
LZ
2050 rc = lnet_parse_local(ni, msg);
2051 if (rc)
2052 goto free_drop;
2053 return 0;
d7e09d03
PT
2054
2055 free_drop:
06ace26e 2056 LASSERT(!msg->msg_md);
d7e09d03
PT
2057 lnet_finalize(ni, msg, rc);
2058
2059 drop:
2060 lnet_drop_message(ni, cpt, private, payload_length);
2061 return 0;
2062}
2063EXPORT_SYMBOL(lnet_parse);
2064
2065void
2066lnet_drop_delayed_msg_list(struct list_head *head, char *reason)
2067{
2068 while (!list_empty(head)) {
7e7ab095
MS
2069 lnet_process_id_t id = {0};
2070 lnet_msg_t *msg;
d7e09d03
PT
2071
2072 msg = list_entry(head->next, lnet_msg_t, msg_list);
2073 list_del(&msg->msg_list);
2074
2075 id.nid = msg->msg_hdr.src_nid;
2076 id.pid = msg->msg_hdr.src_pid;
2077
06ace26e 2078 LASSERT(!msg->msg_md);
d7e09d03 2079 LASSERT(msg->msg_rx_delayed);
06ace26e 2080 LASSERT(msg->msg_rxpeer);
d7e09d03
PT
2081 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2082
b0f5aad5 2083 CWARN("Dropping delayed PUT from %s portal %d match %llu offset %d length %d: %s\n",
d7e09d03
PT
2084 libcfs_id2str(id),
2085 msg->msg_hdr.msg.put.ptl_index,
2086 msg->msg_hdr.msg.put.match_bits,
2087 msg->msg_hdr.msg.put.offset,
2088 msg->msg_hdr.payload_length, reason);
2089
4420cfd3
JS
2090 /*
2091 * NB I can't drop msg's ref on msg_rxpeer until after I've
d7e09d03 2092 * called lnet_drop_message(), so I just hang onto msg as well
4420cfd3
JS
2093 * until that's done
2094 */
d7e09d03
PT
2095 lnet_drop_message(msg->msg_rxpeer->lp_ni,
2096 msg->msg_rxpeer->lp_cpt,
2097 msg->msg_private, msg->msg_len);
2098 /*
2099 * NB: message will not generate event because w/o attached MD,
2100 * but we still should give error code so lnet_msg_decommit()
2101 * can skip counters operations and other checks.
2102 */
2103 lnet_finalize(msg->msg_rxpeer->lp_ni, msg, -ENOENT);
2104 }
2105}
2106
2107void
2108lnet_recv_delayed_msg_list(struct list_head *head)
2109{
2110 while (!list_empty(head)) {
7e7ab095
MS
2111 lnet_msg_t *msg;
2112 lnet_process_id_t id;
d7e09d03
PT
2113
2114 msg = list_entry(head->next, lnet_msg_t, msg_list);
2115 list_del(&msg->msg_list);
2116
4420cfd3
JS
2117 /*
2118 * md won't disappear under me, since each msg
2119 * holds a ref on it
2120 */
d7e09d03
PT
2121 id.nid = msg->msg_hdr.src_nid;
2122 id.pid = msg->msg_hdr.src_pid;
2123
2124 LASSERT(msg->msg_rx_delayed);
06ace26e
JS
2125 LASSERT(msg->msg_md);
2126 LASSERT(msg->msg_rxpeer);
d7e09d03
PT
2127 LASSERT(msg->msg_hdr.type == LNET_MSG_PUT);
2128
2d00bd17
JP
2129 CDEBUG(D_NET, "Resuming delayed PUT from %s portal %d match %llu offset %d length %d.\n",
2130 libcfs_id2str(id), msg->msg_hdr.msg.put.ptl_index,
2131 msg->msg_hdr.msg.put.match_bits,
2132 msg->msg_hdr.msg.put.offset,
2133 msg->msg_hdr.payload_length);
d7e09d03
PT
2134
2135 lnet_recv_put(msg->msg_rxpeer->lp_ni, msg);
2136 }
2137}
2138
2139/**
2140 * Initiate an asynchronous PUT operation.
2141 *
2142 * There are several events associated with a PUT: completion of the send on
2143 * the initiator node (LNET_EVENT_SEND), and when the send completes
2144 * successfully, the receipt of an acknowledgment (LNET_EVENT_ACK) indicating
2145 * that the operation was accepted by the target. The event LNET_EVENT_PUT is
2146 * used at the target node to indicate the completion of incoming data
2147 * delivery.
2148 *
2149 * The local events will be logged in the EQ associated with the MD pointed to
2150 * by \a mdh handle. Using a MD without an associated EQ results in these
2151 * events being discarded. In this case, the caller must have another
2152 * mechanism (e.g., a higher level protocol) for determining when it is safe
2153 * to modify the memory region associated with the MD.
2154 *
2155 * Note that LNet does not guarantee the order of LNET_EVENT_SEND and
2156 * LNET_EVENT_ACK, though intuitively ACK should happen after SEND.
2157 *
2158 * \param self Indicates the NID of a local interface through which to send
2159 * the PUT request. Use LNET_NID_ANY to let LNet choose one by itself.
2160 * \param mdh A handle for the MD that describes the memory to be sent. The MD
2161 * must be "free floating" (See LNetMDBind()).
2162 * \param ack Controls whether an acknowledgment is requested.
2163 * Acknowledgments are only sent when they are requested by the initiating
2164 * process and the target MD enables them.
2165 * \param target A process identifier for the target process.
2166 * \param portal The index in the \a target's portal table.
2167 * \param match_bits The match bits to use for MD selection at the target
2168 * process.
2169 * \param offset The offset into the target MD (only used when the target
2170 * MD has the LNET_MD_MANAGE_REMOTE option set).
2171 * \param hdr_data 64 bits of user data that can be included in the message
2172 * header. This data is written to an event queue entry at the target if an
2173 * EQ is present on the matching MD.
2174 *
2175 * \retval 0 Success, and only in this case events will be generated
2176 * and logged to EQ (if it exists).
2177 * \retval -EIO Simulated failure.
2178 * \retval -ENOMEM Memory allocation failure.
2179 * \retval -ENOENT Invalid MD object.
2180 *
2181 * \see lnet_event_t::hdr_data and lnet_event_kind_t.
2182 */
2183int
2184LNetPut(lnet_nid_t self, lnet_handle_md_t mdh, lnet_ack_req_t ack,
2185 lnet_process_id_t target, unsigned int portal,
2186 __u64 match_bits, unsigned int offset,
2187 __u64 hdr_data)
2188{
7e7ab095
MS
2189 struct lnet_msg *msg;
2190 struct lnet_libmd *md;
2191 int cpt;
2192 int rc;
d7e09d03 2193
af66a6e2 2194 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03 2195
af66a6e2 2196 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 2197 fail_peer(target.nid, 1)) { /* shall we now? */
d7e09d03
PT
2198 CERROR("Dropping PUT to %s: simulated failure\n",
2199 libcfs_id2str(target));
2200 return -EIO;
2201 }
2202
2203 msg = lnet_msg_alloc();
06ace26e 2204 if (!msg) {
d7e09d03
PT
2205 CERROR("Dropping PUT to %s: ENOMEM on lnet_msg_t\n",
2206 libcfs_id2str(target));
2207 return -ENOMEM;
2208 }
2209 msg->msg_vmflush = !!memory_pressure_get();
2210
2211 cpt = lnet_cpt_of_cookie(mdh.cookie);
2212 lnet_res_lock(cpt);
2213
2214 md = lnet_handle2md(&mdh);
5fd88337 2215 if (!md || !md->md_threshold || md->md_me) {
b0f5aad5 2216 CERROR("Dropping PUT (%llu:%d:%s): MD (%d) invalid\n",
d7e09d03 2217 match_bits, portal, libcfs_id2str(target),
06ace26e
JS
2218 !md ? -1 : md->md_threshold);
2219 if (md && md->md_me)
d7e09d03
PT
2220 CERROR("Source MD also attached to portal %d\n",
2221 md->md_me->me_portal);
2222 lnet_res_unlock(cpt);
2223
2224 lnet_msg_free(msg);
2225 return -ENOENT;
2226 }
2227
2228 CDEBUG(D_NET, "LNetPut -> %s\n", libcfs_id2str(target));
2229
2230 lnet_msg_attach_md(msg, md, 0, 0);
2231
2232 lnet_prep_send(msg, LNET_MSG_PUT, target, 0, md->md_length);
2233
2234 msg->msg_hdr.msg.put.match_bits = cpu_to_le64(match_bits);
2235 msg->msg_hdr.msg.put.ptl_index = cpu_to_le32(portal);
2236 msg->msg_hdr.msg.put.offset = cpu_to_le32(offset);
2237 msg->msg_hdr.msg.put.hdr_data = hdr_data;
2238
2239 /* NB handles only looked up by creator (no flips) */
2240 if (ack == LNET_ACK_REQ) {
2241 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2242 the_lnet.ln_interface_cookie;
2243 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2244 md->md_lh.lh_cookie;
2245 } else {
2246 msg->msg_hdr.msg.put.ack_wmd.wh_interface_cookie =
2247 LNET_WIRE_HANDLE_COOKIE_NONE;
2248 msg->msg_hdr.msg.put.ack_wmd.wh_object_cookie =
2249 LNET_WIRE_HANDLE_COOKIE_NONE;
2250 }
2251
2252 lnet_res_unlock(cpt);
2253
2254 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2255
2256 rc = lnet_send(self, msg, LNET_NID_ANY);
5fd88337 2257 if (rc) {
af66a6e2 2258 CNETERR("Error sending PUT to %s: %d\n",
c314c319 2259 libcfs_id2str(target), rc);
af66a6e2 2260 lnet_finalize(NULL, msg, rc);
d7e09d03
PT
2261 }
2262
2263 /* completion will be signalled by an event */
2264 return 0;
2265}
2266EXPORT_SYMBOL(LNetPut);
2267
2268lnet_msg_t *
af66a6e2 2269lnet_create_reply_msg(lnet_ni_t *ni, lnet_msg_t *getmsg)
d7e09d03 2270{
4420cfd3
JS
2271 /*
2272 * The LND can DMA direct to the GET md (i.e. no REPLY msg). This
d7e09d03
PT
2273 * returns a msg for the LND to pass to lnet_finalize() when the sink
2274 * data has been received.
2275 *
2276 * CAVEAT EMPTOR: 'getmsg' is the original GET, which is freed when
4420cfd3
JS
2277 * lnet_finalize() is called on it, so the LND must call this first
2278 */
7e7ab095
MS
2279 struct lnet_msg *msg = lnet_msg_alloc();
2280 struct lnet_libmd *getmd = getmsg->msg_md;
2281 lnet_process_id_t peer_id = getmsg->msg_target;
2282 int cpt;
d7e09d03
PT
2283
2284 LASSERT(!getmsg->msg_target_is_router);
2285 LASSERT(!getmsg->msg_routing);
2286
06ace26e 2287 if (!msg) {
af66a6e2 2288 CERROR("%s: Dropping REPLY from %s: can't allocate msg\n",
c314c319 2289 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id));
d7e09d03
PT
2290 goto drop;
2291 }
2292
600e9b49
LZ
2293 cpt = lnet_cpt_of_cookie(getmd->md_lh.lh_cookie);
2294 lnet_res_lock(cpt);
2295
2296 LASSERT(getmd->md_refcount > 0);
2297
5fd88337 2298 if (!getmd->md_threshold) {
af66a6e2 2299 CERROR("%s: Dropping REPLY from %s for inactive MD %p\n",
c314c319
JS
2300 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id),
2301 getmd);
d7e09d03
PT
2302 lnet_res_unlock(cpt);
2303 goto drop;
2304 }
2305
5fd88337 2306 LASSERT(!getmd->md_offset);
d7e09d03
PT
2307
2308 CDEBUG(D_NET, "%s: Reply from %s md %p\n",
2309 libcfs_nid2str(ni->ni_nid), libcfs_id2str(peer_id), getmd);
2310
2311 /* setup information for lnet_build_msg_event */
2312 msg->msg_from = peer_id.nid;
2313 msg->msg_type = LNET_MSG_GET; /* flag this msg as an "optimized" GET */
2314 msg->msg_hdr.src_nid = peer_id.nid;
2315 msg->msg_hdr.payload_length = getmd->md_length;
2316 msg->msg_receiving = 1; /* required by lnet_msg_attach_md */
2317
2318 lnet_msg_attach_md(msg, getmd, getmd->md_offset, getmd->md_length);
2319 lnet_res_unlock(cpt);
2320
2321 cpt = lnet_cpt_of_nid(peer_id.nid);
2322
2323 lnet_net_lock(cpt);
2324 lnet_msg_commit(msg, cpt);
2325 lnet_net_unlock(cpt);
2326
2327 lnet_build_msg_event(msg, LNET_EVENT_REPLY);
2328
2329 return msg;
2330
2331 drop:
2332 cpt = lnet_cpt_of_nid(peer_id.nid);
2333
2334 lnet_net_lock(cpt);
2335 the_lnet.ln_counters[cpt]->drop_count++;
2336 the_lnet.ln_counters[cpt]->drop_length += getmd->md_length;
2337 lnet_net_unlock(cpt);
2338
06ace26e 2339 if (msg)
d7e09d03
PT
2340 lnet_msg_free(msg);
2341
2342 return NULL;
2343}
2344EXPORT_SYMBOL(lnet_create_reply_msg);
2345
2346void
2347lnet_set_reply_msg_len(lnet_ni_t *ni, lnet_msg_t *reply, unsigned int len)
2348{
4420cfd3
JS
2349 /*
2350 * Set the REPLY length, now the RDMA that elides the REPLY message has
2351 * completed and I know it.
2352 */
06ace26e 2353 LASSERT(reply);
af66a6e2
LN
2354 LASSERT(reply->msg_type == LNET_MSG_GET);
2355 LASSERT(reply->msg_ev.type == LNET_EVENT_REPLY);
d7e09d03 2356
4420cfd3
JS
2357 /*
2358 * NB I trusted my peer to RDMA. If she tells me she's written beyond
2359 * the end of my buffer, I might as well be dead.
2360 */
af66a6e2 2361 LASSERT(len <= reply->msg_ev.mlength);
d7e09d03
PT
2362
2363 reply->msg_ev.mlength = len;
2364}
2365EXPORT_SYMBOL(lnet_set_reply_msg_len);
2366
2367/**
2368 * Initiate an asynchronous GET operation.
2369 *
2370 * On the initiator node, an LNET_EVENT_SEND is logged when the GET request
2371 * is sent, and an LNET_EVENT_REPLY is logged when the data returned from
2372 * the target node in the REPLY has been written to local MD.
2373 *
2374 * On the target node, an LNET_EVENT_GET is logged when the GET request
2375 * arrives and is accepted into a MD.
2376 *
2377 * \param self,target,portal,match_bits,offset See the discussion in LNetPut().
2378 * \param mdh A handle for the MD that describes the memory into which the
ae4003f0
LN
2379 * requested data will be received. The MD must be "free floating"
2380 * (See LNetMDBind()).
d7e09d03
PT
2381 *
2382 * \retval 0 Success, and only in this case events will be generated
2383 * and logged to EQ (if it exists) of the MD.
2384 * \retval -EIO Simulated failure.
2385 * \retval -ENOMEM Memory allocation failure.
2386 * \retval -ENOENT Invalid MD object.
2387 */
2388int
2389LNetGet(lnet_nid_t self, lnet_handle_md_t mdh,
2390 lnet_process_id_t target, unsigned int portal,
2391 __u64 match_bits, unsigned int offset)
2392{
7e7ab095
MS
2393 struct lnet_msg *msg;
2394 struct lnet_libmd *md;
2395 int cpt;
2396 int rc;
d7e09d03 2397
af66a6e2 2398 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03 2399
af66a6e2 2400 if (!list_empty(&the_lnet.ln_test_peers) && /* normally we don't */
9b79ca85 2401 fail_peer(target.nid, 1)) { /* shall we now? */
d7e09d03
PT
2402 CERROR("Dropping GET to %s: simulated failure\n",
2403 libcfs_id2str(target));
2404 return -EIO;
2405 }
2406
2407 msg = lnet_msg_alloc();
06ace26e 2408 if (!msg) {
d7e09d03
PT
2409 CERROR("Dropping GET to %s: ENOMEM on lnet_msg_t\n",
2410 libcfs_id2str(target));
2411 return -ENOMEM;
2412 }
2413
2414 cpt = lnet_cpt_of_cookie(mdh.cookie);
2415 lnet_res_lock(cpt);
2416
2417 md = lnet_handle2md(&mdh);
5fd88337 2418 if (!md || !md->md_threshold || md->md_me) {
b0f5aad5 2419 CERROR("Dropping GET (%llu:%d:%s): MD (%d) invalid\n",
d7e09d03 2420 match_bits, portal, libcfs_id2str(target),
06ace26e
JS
2421 !md ? -1 : md->md_threshold);
2422 if (md && md->md_me)
d7e09d03
PT
2423 CERROR("REPLY MD also attached to portal %d\n",
2424 md->md_me->me_portal);
2425
2426 lnet_res_unlock(cpt);
2427
2428 lnet_msg_free(msg);
d7e09d03
PT
2429 return -ENOENT;
2430 }
2431
2432 CDEBUG(D_NET, "LNetGet -> %s\n", libcfs_id2str(target));
2433
2434 lnet_msg_attach_md(msg, md, 0, 0);
2435
2436 lnet_prep_send(msg, LNET_MSG_GET, target, 0, 0);
2437
2438 msg->msg_hdr.msg.get.match_bits = cpu_to_le64(match_bits);
2439 msg->msg_hdr.msg.get.ptl_index = cpu_to_le32(portal);
2440 msg->msg_hdr.msg.get.src_offset = cpu_to_le32(offset);
2441 msg->msg_hdr.msg.get.sink_length = cpu_to_le32(md->md_length);
2442
2443 /* NB handles only looked up by creator (no flips) */
2444 msg->msg_hdr.msg.get.return_wmd.wh_interface_cookie =
2445 the_lnet.ln_interface_cookie;
2446 msg->msg_hdr.msg.get.return_wmd.wh_object_cookie =
2447 md->md_lh.lh_cookie;
2448
2449 lnet_res_unlock(cpt);
2450
2451 lnet_build_msg_event(msg, LNET_EVENT_SEND);
2452
2453 rc = lnet_send(self, msg, LNET_NID_ANY);
2454 if (rc < 0) {
af66a6e2 2455 CNETERR("Error sending GET to %s: %d\n",
c314c319 2456 libcfs_id2str(target), rc);
af66a6e2 2457 lnet_finalize(NULL, msg, rc);
d7e09d03
PT
2458 }
2459
2460 /* completion will be signalled by an event */
2461 return 0;
2462}
2463EXPORT_SYMBOL(LNetGet);
2464
2465/**
2466 * Calculate distance to node at \a dstnid.
2467 *
2468 * \param dstnid Target NID.
2469 * \param srcnidp If not NULL, NID of the local interface to reach \a dstnid
2470 * is saved here.
2471 * \param orderp If not NULL, order of the route to reach \a dstnid is saved
2472 * here.
2473 *
2474 * \retval 0 If \a dstnid belongs to a local interface, and reserved option
2475 * local_nid_dist_zero is set, which is the default.
2476 * \retval positives Distance to target NID, i.e. number of hops plus one.
2477 * \retval -EHOSTUNREACH If \a dstnid is not reachable.
2478 */
2479int
2480LNetDist(lnet_nid_t dstnid, lnet_nid_t *srcnidp, __u32 *orderp)
2481{
7e7ab095
MS
2482 struct list_head *e;
2483 struct lnet_ni *ni;
2484 lnet_remotenet_t *rnet;
2485 __u32 dstnet = LNET_NIDNET(dstnid);
2486 int hops;
2487 int cpt;
2488 __u32 order = 2;
2489 struct list_head *rn_list;
d7e09d03 2490
4420cfd3
JS
2491 /*
2492 * if !local_nid_dist_zero, I don't return a distance of 0 ever
d7e09d03
PT
2493 * (when lustre sees a distance of 0, it substitutes 0@lo), so I
2494 * keep order 0 free for 0@lo and order 1 free for a local NID
4420cfd3
JS
2495 * match
2496 */
af66a6e2 2497 LASSERT(the_lnet.ln_refcount > 0);
d7e09d03
PT
2498
2499 cpt = lnet_net_lock_current();
2500
af66a6e2 2501 list_for_each(e, &the_lnet.ln_nis) {
d7e09d03
PT
2502 ni = list_entry(e, lnet_ni_t, ni_list);
2503
2504 if (ni->ni_nid == dstnid) {
06ace26e 2505 if (srcnidp)
d7e09d03 2506 *srcnidp = dstnid;
06ace26e 2507 if (orderp) {
d7e09d03
PT
2508 if (LNET_NETTYP(LNET_NIDNET(dstnid)) == LOLND)
2509 *orderp = 0;
2510 else
2511 *orderp = 1;
2512 }
2513 lnet_net_unlock(cpt);
2514
2515 return local_nid_dist_zero ? 0 : 1;
2516 }
2517
2518 if (LNET_NIDNET(ni->ni_nid) == dstnet) {
06ace26e 2519 if (srcnidp)
d7e09d03 2520 *srcnidp = ni->ni_nid;
06ace26e 2521 if (orderp)
d7e09d03
PT
2522 *orderp = order;
2523 lnet_net_unlock(cpt);
2524 return 1;
2525 }
2526
2527 order++;
2528 }
2529
2530 rn_list = lnet_net2rnethash(dstnet);
2531 list_for_each(e, rn_list) {
2532 rnet = list_entry(e, lnet_remotenet_t, lrn_list);
2533
2534 if (rnet->lrn_net == dstnet) {
2535 lnet_route_t *route;
2536 lnet_route_t *shortest = NULL;
b9bbb61c
AS
2537 __u32 shortest_hops = LNET_UNDEFINED_HOPS;
2538 __u32 route_hops;
d7e09d03 2539
af66a6e2 2540 LASSERT(!list_empty(&rnet->lrn_routes));
d7e09d03
PT
2541
2542 list_for_each_entry(route, &rnet->lrn_routes,
c314c319 2543 lr_list) {
b9bbb61c
AS
2544 route_hops = route->lr_hops;
2545 if (route_hops == LNET_UNDEFINED_HOPS)
2546 route_hops = 1;
06ace26e 2547 if (!shortest ||
b9bbb61c 2548 route_hops < shortest_hops) {
d7e09d03 2549 shortest = route;
b9bbb61c
AS
2550 shortest_hops = route_hops;
2551 }
d7e09d03
PT
2552 }
2553
06ace26e 2554 LASSERT(shortest);
b9bbb61c 2555 hops = shortest_hops;
06ace26e 2556 if (srcnidp)
d7e09d03 2557 *srcnidp = shortest->lr_gateway->lp_ni->ni_nid;
06ace26e 2558 if (orderp)
d7e09d03
PT
2559 *orderp = order;
2560 lnet_net_unlock(cpt);
2561 return hops + 1;
2562 }
2563 order++;
2564 }
2565
2566 lnet_net_unlock(cpt);
2567 return -EHOSTUNREACH;
2568}
2569EXPORT_SYMBOL(LNetDist);