4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, 2015 Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_RPC
35 #include "../../include/linux/libcfs/libcfs.h"
37 # include <linux/kernel.h>
40 #include "../include/obd_class.h"
41 #include "../include/lustre_net.h"
42 #include "../include/lustre_sec.h"
43 #include "ptlrpc_internal.h"
45 lnet_handle_eq_t ptlrpc_eq_h;
48 * Client's outgoing request callback
50 void request_out_callback(lnet_event_t *ev)
52 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
53 struct ptlrpc_request *req = cbid->cbid_arg;
56 LASSERT(ev->type == LNET_EVENT_SEND || ev->type == LNET_EVENT_UNLINK);
57 LASSERT(ev->unlinked);
59 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
61 sptlrpc_request_out_callback(req);
63 spin_lock(&req->rq_lock);
64 req->rq_real_sent = ktime_get_real_seconds();
65 req->rq_req_unlinked = 1;
66 /* reply_in_callback happened before request_out_callback? */
67 if (req->rq_reply_unlinked)
70 if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
71 /* Failed send: make it seem like the reply timed out, just
72 * like failing sends in client.c does currently...
79 ptlrpc_client_wake_req(req);
81 spin_unlock(&req->rq_lock);
83 ptlrpc_req_finished(req);
87 * Client's incoming reply callback
89 void reply_in_callback(lnet_event_t *ev)
91 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
92 struct ptlrpc_request *req = cbid->cbid_arg;
94 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
96 LASSERT(ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_UNLINK);
97 LASSERT(ev->md.start == req->rq_repbuf);
98 LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len);
99 /* We've set LNET_MD_MANAGE_REMOTE for all outgoing requests
100 * for adaptive timeouts' early reply.
102 LASSERT((ev->md.options & LNET_MD_MANAGE_REMOTE) != 0);
104 spin_lock(&req->rq_lock);
106 req->rq_receiving_reply = 0;
109 req->rq_reply_unlinked = 1;
114 if (ev->type == LNET_EVENT_UNLINK) {
115 LASSERT(ev->unlinked);
116 DEBUG_REQ(D_NET, req, "unlink");
120 if (ev->mlength < ev->rlength) {
121 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req,
122 req->rq_replen, ev->rlength, ev->offset);
123 req->rq_reply_truncated = 1;
125 req->rq_status = -EOVERFLOW;
126 req->rq_nob_received = ev->rlength + ev->offset;
130 if ((ev->offset == 0) &&
131 ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) {
133 DEBUG_REQ(D_ADAPTTO, req,
134 "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d",
135 ev->mlength, ev->offset,
136 req->rq_replen, req->rq_replied, ev->unlinked);
138 req->rq_early_count++; /* number received, client side */
140 /* already got the real reply or buffers are already unlinked */
141 if (req->rq_replied || req->rq_reply_unlinked == 1)
145 req->rq_reply_off = ev->offset;
146 req->rq_nob_received = ev->mlength;
147 /* And we're still receiving */
148 req->rq_receiving_reply = 1;
151 req->rq_rep_swab_mask = 0;
153 /* Got reply, no resend required */
155 req->rq_reply_off = ev->offset;
156 req->rq_nob_received = ev->mlength;
157 /* LNetMDUnlink can't be called under the LNET_LOCK,
158 * so we must unlink in ptlrpc_unregister_reply
160 DEBUG_REQ(D_INFO, req,
161 "reply in flags=%x mlen=%u offset=%d replen=%d",
162 lustre_msg_get_flags(req->rq_reqmsg),
163 ev->mlength, ev->offset, req->rq_replen);
166 req->rq_import->imp_last_reply_time = ktime_get_real_seconds();
169 /* NB don't unlock till after wakeup; req can disappear under us
170 * since we don't have our own ref
172 ptlrpc_client_wake_req(req);
173 spin_unlock(&req->rq_lock);
177 * Client's bulk has been written/read
179 void client_bulk_callback(lnet_event_t *ev)
181 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
182 struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
183 struct ptlrpc_request *req;
185 LASSERT((desc->bd_type == BULK_PUT_SINK &&
186 ev->type == LNET_EVENT_PUT) ||
187 (desc->bd_type == BULK_GET_SOURCE &&
188 ev->type == LNET_EVENT_GET) ||
189 ev->type == LNET_EVENT_UNLINK);
190 LASSERT(ev->unlinked);
192 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB, CFS_FAIL_ONCE))
195 if (CFS_FAIL_CHECK_ORSET(OBD_FAIL_PTLRPC_CLIENT_BULK_CB2,
199 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
200 "event type %d, status %d, desc %p\n",
201 ev->type, ev->status, desc);
203 spin_lock(&desc->bd_lock);
205 LASSERT(desc->bd_md_count > 0);
208 if (ev->type != LNET_EVENT_UNLINK && ev->status == 0) {
209 desc->bd_nob_transferred += ev->mlength;
210 desc->bd_sender = ev->sender;
212 /* start reconnect and resend if network error hit */
213 spin_lock(&req->rq_lock);
215 spin_unlock(&req->rq_lock);
219 desc->bd_failure = 1;
221 /* NB don't unlock till after wakeup; desc can disappear under us
224 if (desc->bd_md_count == 0)
225 ptlrpc_client_wake_req(desc->bd_req);
227 spin_unlock(&desc->bd_lock);
231 * We will have percpt request history list for ptlrpc service in upcoming
232 * patches because we don't want to be serialized by current per-service
233 * history operations. So we require history ID can (somehow) show arriving
234 * order w/o grabbing global lock, and user can sort them in userspace.
236 * This is how we generate history ID for ptlrpc_request:
237 * ----------------------------------------------------
238 * | 32 bits | 16 bits | (16 - X)bits | X bits |
239 * ----------------------------------------------------
240 * | seconds | usec / 16 | sequence | CPT id |
241 * ----------------------------------------------------
243 * it might not be precise but should be good enough.
246 #define REQS_CPT_BITS(svcpt) ((svcpt)->scp_service->srv_cpt_bits)
248 #define REQS_SEC_SHIFT 32
249 #define REQS_USEC_SHIFT 16
250 #define REQS_SEQ_SHIFT(svcpt) REQS_CPT_BITS(svcpt)
252 static void ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt,
253 struct ptlrpc_request *req)
255 __u64 sec = req->rq_arrival_time.tv_sec;
256 __u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
259 /* set sequence ID for request and add it to history list,
260 * it must be called with hold svcpt::scp_lock
263 new_seq = (sec << REQS_SEC_SHIFT) |
264 (usec << REQS_USEC_SHIFT) |
265 (svcpt->scp_cpt < 0 ? 0 : svcpt->scp_cpt);
267 if (new_seq > svcpt->scp_hist_seq) {
268 /* This handles the initial case of scp_hist_seq == 0 or
269 * we just jumped into a new time window
271 svcpt->scp_hist_seq = new_seq;
273 LASSERT(REQS_SEQ_SHIFT(svcpt) < REQS_USEC_SHIFT);
274 /* NB: increase sequence number in current usec bucket,
275 * however, it's possible that we used up all bits for
276 * sequence and jumped into the next usec bucket (future time),
277 * then we hope there will be less RPCs per bucket at some
278 * point, and sequence will catch up again
280 svcpt->scp_hist_seq += (1U << REQS_SEQ_SHIFT(svcpt));
281 new_seq = svcpt->scp_hist_seq;
284 req->rq_history_seq = new_seq;
286 list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs);
290 * Server's incoming request callback
292 void request_in_callback(lnet_event_t *ev)
294 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
295 struct ptlrpc_request_buffer_desc *rqbd = cbid->cbid_arg;
296 struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
297 struct ptlrpc_service *service = svcpt->scp_service;
298 struct ptlrpc_request *req;
300 LASSERT(ev->type == LNET_EVENT_PUT ||
301 ev->type == LNET_EVENT_UNLINK);
302 LASSERT((char *)ev->md.start >= rqbd->rqbd_buffer);
303 LASSERT((char *)ev->md.start + ev->offset + ev->mlength <=
304 rqbd->rqbd_buffer + service->srv_buf_size);
306 CDEBUG((ev->status == 0) ? D_NET : D_ERROR,
307 "event type %d, status %d, service %s\n",
308 ev->type, ev->status, service->srv_name);
311 /* If this is the last request message to fit in the
312 * request buffer we can use the request object embedded in
313 * rqbd. Note that if we failed to allocate a request,
314 * we'd have to re-post the rqbd, which we can't do in this
317 req = &rqbd->rqbd_req;
318 memset(req, 0, sizeof(*req));
320 LASSERT(ev->type == LNET_EVENT_PUT);
321 if (ev->status != 0) {
322 /* We moaned above already... */
325 req = ptlrpc_request_cache_alloc(GFP_ATOMIC);
327 CERROR("Can't allocate incoming request descriptor: Dropping %s RPC from %s\n",
329 libcfs_id2str(ev->initiator));
334 ptlrpc_srv_req_init(req);
335 /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL,
336 * flags are reset and scalars are zero. We only set the message
337 * size to non-zero if this was a successful receive.
339 req->rq_xid = ev->match_bits;
340 req->rq_reqbuf = ev->md.start + ev->offset;
341 if (ev->type == LNET_EVENT_PUT && ev->status == 0)
342 req->rq_reqdata_len = ev->mlength;
343 ktime_get_real_ts64(&req->rq_arrival_time);
344 req->rq_peer = ev->initiator;
345 req->rq_self = ev->target.nid;
347 req->rq_phase = RQ_PHASE_NEW;
348 if (ev->type == LNET_EVENT_PUT)
349 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n",
350 req, req->rq_xid, ev->mlength);
352 CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer));
354 spin_lock(&svcpt->scp_lock);
356 ptlrpc_req_add_history(svcpt, req);
359 svcpt->scp_nrqbds_posted--;
360 CDEBUG(D_INFO, "Buffer complete: %d buffers still posted\n",
361 svcpt->scp_nrqbds_posted);
363 /* Normally, don't complain about 0 buffers posted; LNET won't
364 * drop incoming reqs since we set the portal lazy
366 if (test_req_buffer_pressure &&
367 ev->type != LNET_EVENT_UNLINK &&
368 svcpt->scp_nrqbds_posted == 0)
369 CWARN("All %s request buffers busy\n",
372 /* req takes over the network's ref on rqbd */
374 /* req takes a ref on rqbd */
375 rqbd->rqbd_refcount++;
378 list_add_tail(&req->rq_list, &svcpt->scp_req_incoming);
379 svcpt->scp_nreqs_incoming++;
381 /* NB everything can disappear under us once the request
382 * has been queued and we unlock, so do the wake now...
384 wake_up(&svcpt->scp_waitq);
386 spin_unlock(&svcpt->scp_lock);
390 * Server's outgoing reply callback
392 void reply_out_callback(lnet_event_t *ev)
394 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
395 struct ptlrpc_reply_state *rs = cbid->cbid_arg;
396 struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
398 LASSERT(ev->type == LNET_EVENT_SEND ||
399 ev->type == LNET_EVENT_ACK ||
400 ev->type == LNET_EVENT_UNLINK);
402 if (!rs->rs_difficult) {
403 /* 'Easy' replies have no further processing so I drop the
406 LASSERT(ev->unlinked);
407 ptlrpc_rs_decref(rs);
411 LASSERT(rs->rs_on_net);
414 /* Last network callback. The net's ref on 'rs' stays put
415 * until ptlrpc_handle_rs() is done with it
417 spin_lock(&svcpt->scp_rep_lock);
418 spin_lock(&rs->rs_lock);
421 if (!rs->rs_no_ack ||
423 rs->rs_export->exp_obd->obd_last_committed)
424 ptlrpc_schedule_difficult_reply(rs);
426 spin_unlock(&rs->rs_lock);
427 spin_unlock(&svcpt->scp_rep_lock);
431 static void ptlrpc_master_callback(lnet_event_t *ev)
433 struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
434 void (*callback)(lnet_event_t *ev) = cbid->cbid_fn;
436 /* Honestly, it's best to find out early. */
437 LASSERT(cbid->cbid_arg != LP_POISON);
438 LASSERT(callback == request_out_callback ||
439 callback == reply_in_callback ||
440 callback == client_bulk_callback ||
441 callback == request_in_callback ||
442 callback == reply_out_callback);
447 int ptlrpc_uuid_to_peer(struct obd_uuid *uuid,
448 lnet_process_id_t *peer, lnet_nid_t *self)
451 __u32 best_order = 0;
459 peer->pid = LNET_PID_LUSTRE;
461 /* Choose the matching UUID that's closest */
462 while (lustre_uuid_to_peer(uuid->uuid, &dst_nid, count++) == 0) {
463 dist = LNetDist(dst_nid, &src_nid, &order);
467 if (dist == 0) { /* local! use loopback LND */
468 peer->nid = *self = LNET_MKNID(LNET_MKNET(LOLND, 0), 0);
475 (dist == best_dist && order < best_order)) {
485 CDEBUG(D_NET, "%s->%s\n", uuid->uuid, libcfs_id2str(*peer));
489 static void ptlrpc_ni_fini(void)
491 wait_queue_head_t waitq;
492 struct l_wait_info lwi;
496 /* Wait for the event queue to become idle since there may still be
497 * messages in flight with pending events (i.e. the fire-and-forget
498 * messages == client requests and "non-difficult" server
502 for (retries = 0;; retries++) {
503 rc = LNetEQFree(ptlrpc_eq_h);
514 CWARN("Event queue still busy\n");
517 init_waitqueue_head(&waitq);
518 lwi = LWI_TIMEOUT(cfs_time_seconds(2), NULL, NULL);
519 l_wait_event(waitq, 0, &lwi);
526 static lnet_pid_t ptl_get_pid(void)
530 pid = LNET_PID_LUSTRE;
534 static int ptlrpc_ni_init(void)
540 CDEBUG(D_NET, "My pid is: %x\n", pid);
542 /* We're not passing any limits yet... */
543 rc = LNetNIInit(pid);
545 CDEBUG(D_NET, "Can't init network interface: %d\n", rc);
549 /* CAVEAT EMPTOR: how we process portals events is _radically_
550 * different depending on...
552 /* kernel LNet calls our master callback when there are new event,
553 * because we are guaranteed to get every event via callback,
554 * so we just set EQ size to 0 to avoid overhead of serializing
555 * enqueue/dequeue operations in LNet.
557 rc = LNetEQAlloc(0, ptlrpc_master_callback, &ptlrpc_eq_h);
561 CERROR("Failed to allocate event queue: %d\n", rc);
567 int ptlrpc_init_portals(void)
569 int rc = ptlrpc_ni_init();
572 CERROR("network initialisation failed\n");
575 rc = ptlrpcd_addref();
579 CERROR("rpcd initialisation failed\n");
584 void ptlrpc_exit_portals(void)