Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / drivers / staging / lustre / include / linux / lnet / types.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
4f3ca893 18 * http://www.gnu.org/licenses/gpl-2.0.html
d7e09d03
PT
19 *
20 * GPL HEADER END
21 */
22/*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
25 *
4f3ca893 26 * Copyright (c) 2012 - 2015, Intel Corporation.
d7e09d03
PT
27 */
28/*
29 * This file is part of Lustre, http://www.lustre.org/
4f3ca893 30 * Lustre is a trademark of Seagate, Inc.
d7e09d03
PT
31 */
32
33#ifndef __LNET_TYPES_H__
34#define __LNET_TYPES_H__
35
db18b8e9
JS
36#include <linux/types.h>
37
d7e09d03 38/** \addtogroup lnet
4420cfd3
JS
39 * @{
40 */
d7e09d03 41
5b0e50b9
JS
42#define LNET_VERSION "0.6.0"
43
d7e09d03 44/** \addtogroup lnet_addr
4420cfd3
JS
45 * @{
46 */
d7e09d03
PT
47
48/** Portal reserved for LNet's own use.
49 * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
50 */
188acc61 51#define LNET_RESERVED_PORTAL 0
d7e09d03
PT
52
53/**
54 * Address of an end-point in an LNet network.
55 *
56 * A node can have multiple end-points and hence multiple addresses.
57 * An LNet network can be a simple network (e.g. tcp0) or a network of
58 * LNet networks connected by LNet routers. Therefore an end-point address
59 * has two parts: network ID, and address within a network.
60 *
61 * \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID.
62 */
63typedef __u64 lnet_nid_t;
64/**
65 * ID of a process in a node. Shortened as PID to distinguish from
66 * lnet_process_id_t, the global process ID.
67 */
68typedef __u32 lnet_pid_t;
69
70/** wildcard NID that matches any end-point address */
188acc61 71#define LNET_NID_ANY ((lnet_nid_t) -1)
d7e09d03 72/** wildcard PID that matches any lnet_pid_t */
188acc61 73#define LNET_PID_ANY ((lnet_pid_t) -1)
d7e09d03
PT
74
75#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
76#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
db18b8e9 77#define LNET_PID_LUSTRE 12345
d7e09d03 78
188acc61 79#define LNET_TIME_FOREVER (-1)
d7e09d03 80
bbf00c3d
JS
81/* how an LNET NID encodes net:address */
82/** extract the address part of an lnet_nid_t */
83
84static inline __u32 LNET_NIDADDR(lnet_nid_t nid)
85{
86 return nid & 0xffffffff;
87}
88
89static inline __u32 LNET_NIDNET(lnet_nid_t nid)
90{
91 return (nid >> 32) & 0xffffffff;
92}
93
94static inline lnet_nid_t LNET_MKNID(__u32 net, __u32 addr)
95{
96 return (((__u64)net) << 32) | addr;
97}
98
99static inline __u32 LNET_NETNUM(__u32 net)
100{
101 return net & 0xffff;
102}
103
104static inline __u32 LNET_NETTYP(__u32 net)
105{
106 return (net >> 16) & 0xffff;
107}
108
109static inline __u32 LNET_MKNET(__u32 type, __u32 num)
110{
111 return (type << 16) | num;
112}
113
db18b8e9
JS
114#define WIRE_ATTR __packed
115
116/* Packed version of lnet_process_id_t to transfer via network */
117typedef struct {
118 /* node id / process id */
119 lnet_nid_t nid;
120 lnet_pid_t pid;
121} WIRE_ATTR lnet_process_id_packed_t;
122
4420cfd3
JS
123/*
124 * The wire handle's interface cookie only matches one network interface in
db18b8e9
JS
125 * one epoch (i.e. new cookie when the interface restarts or the node
126 * reboots). The object cookie only matches one object on that interface
4420cfd3
JS
127 * during that object's lifetime (i.e. no cookie re-use).
128 */
db18b8e9
JS
129typedef struct {
130 __u64 wh_interface_cookie;
131 __u64 wh_object_cookie;
132} WIRE_ATTR lnet_handle_wire_t;
133
134typedef enum {
135 LNET_MSG_ACK = 0,
136 LNET_MSG_PUT,
137 LNET_MSG_GET,
138 LNET_MSG_REPLY,
139 LNET_MSG_HELLO,
140} lnet_msg_type_t;
141
4420cfd3
JS
142/*
143 * The variant fields of the portals message header are aligned on an 8
db18b8e9
JS
144 * byte boundary in the message header. Note that all types used in these
145 * wire structs MUST be fixed size and the smaller types are placed at the
4420cfd3
JS
146 * end.
147 */
db18b8e9
JS
148typedef struct lnet_ack {
149 lnet_handle_wire_t dst_wmd;
150 __u64 match_bits;
151 __u32 mlength;
152} WIRE_ATTR lnet_ack_t;
153
154typedef struct lnet_put {
155 lnet_handle_wire_t ack_wmd;
156 __u64 match_bits;
157 __u64 hdr_data;
158 __u32 ptl_index;
159 __u32 offset;
160} WIRE_ATTR lnet_put_t;
161
162typedef struct lnet_get {
163 lnet_handle_wire_t return_wmd;
164 __u64 match_bits;
165 __u32 ptl_index;
166 __u32 src_offset;
167 __u32 sink_length;
168} WIRE_ATTR lnet_get_t;
169
170typedef struct lnet_reply {
171 lnet_handle_wire_t dst_wmd;
172} WIRE_ATTR lnet_reply_t;
173
174typedef struct lnet_hello {
175 __u64 incarnation;
176 __u32 type;
177} WIRE_ATTR lnet_hello_t;
178
179typedef struct {
180 lnet_nid_t dest_nid;
181 lnet_nid_t src_nid;
182 lnet_pid_t dest_pid;
183 lnet_pid_t src_pid;
184 __u32 type; /* lnet_msg_type_t */
185 __u32 payload_length; /* payload data to follow */
186 /*<------__u64 aligned------->*/
187 union {
188 lnet_ack_t ack;
189 lnet_put_t put;
190 lnet_get_t get;
191 lnet_reply_t reply;
192 lnet_hello_t hello;
193 } msg;
194} WIRE_ATTR lnet_hdr_t;
195
4420cfd3
JS
196/*
197 * A HELLO message contains a magic number and protocol version
db18b8e9
JS
198 * code in the header's dest_nid, the peer's NID in the src_nid, and
199 * LNET_MSG_HELLO in the type field. All other common fields are zero
200 * (including payload_size; i.e. no payload).
201 * This is for use by byte-stream LNDs (e.g. TCP/IP) to check the peer is
202 * running the same protocol and to find out its NID. These LNDs should
203 * exchange HELLO messages when a connection is first established. Individual
204 * LNDs can put whatever else they fancy in lnet_hdr_t::msg.
205 */
206typedef struct {
207 __u32 magic; /* LNET_PROTO_TCP_MAGIC */
208 __u16 version_major; /* increment on incompatible change */
209 __u16 version_minor; /* increment on compatible change */
210} WIRE_ATTR lnet_magicversion_t;
211
212/* PROTO MAGIC for LNDs */
213#define LNET_PROTO_IB_MAGIC 0x0be91b91
214#define LNET_PROTO_GNI_MAGIC 0xb00fbabe /* ask Kim */
215#define LNET_PROTO_TCP_MAGIC 0xeebc0ded
216#define LNET_PROTO_ACCEPTOR_MAGIC 0xacce7100
217#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */
218
219/* Placeholder for a future "unified" protocol across all LNDs */
4420cfd3
JS
220/*
221 * Current LNDs that receive a request with this magic will respond with a
222 * "stub" reply using their current protocol
223 */
db18b8e9
JS
224#define LNET_PROTO_MAGIC 0x45726963 /* ! */
225
226#define LNET_PROTO_TCP_VERSION_MAJOR 1
227#define LNET_PROTO_TCP_VERSION_MINOR 0
228
229/* Acceptor connection request */
230typedef struct {
231 __u32 acr_magic; /* PTL_ACCEPTOR_PROTO_MAGIC */
232 __u32 acr_version; /* protocol version */
233 __u64 acr_nid; /* target NID */
234} WIRE_ATTR lnet_acceptor_connreq_t;
235
236#define LNET_PROTO_ACCEPTOR_VERSION 1
237
238typedef struct {
239 lnet_nid_t ns_nid;
240 __u32 ns_status;
241 __u32 ns_unused;
242} WIRE_ATTR lnet_ni_status_t;
243
244typedef struct {
245 __u32 pi_magic;
246 __u32 pi_features;
247 lnet_pid_t pi_pid;
248 __u32 pi_nnis;
249 lnet_ni_status_t pi_ni[0];
250} WIRE_ATTR lnet_ping_info_t;
251
252typedef struct lnet_counters {
253 __u32 msgs_alloc;
254 __u32 msgs_max;
255 __u32 errors;
256 __u32 send_count;
257 __u32 recv_count;
258 __u32 route_count;
259 __u32 drop_count;
260 __u64 send_length;
261 __u64 recv_length;
262 __u64 route_length;
263 __u64 drop_length;
264} WIRE_ATTR lnet_counters_t;
265
266#define LNET_NI_STATUS_UP 0x15aac0de
267#define LNET_NI_STATUS_DOWN 0xdeadface
268#define LNET_NI_STATUS_INVALID 0x00000000
269
270#define LNET_MAX_INTERFACES 16
271
4420cfd3 272/**
d7e09d03
PT
273 * Objects maintained by the LNet are accessed through handles. Handle types
274 * have names of the form lnet_handle_xx_t, where xx is one of the two letter
275 * object type codes ('eq' for event queue, 'md' for memory descriptor, and
276 * 'me' for match entry).
277 * Each type of object is given a unique handle type to enhance type checking.
278 * The type lnet_handle_any_t can be used when a generic handle is needed.
279 * Every handle value can be converted into a value of type lnet_handle_any_t
280 * without loss of information.
281 */
282typedef struct {
283 __u64 cookie;
284} lnet_handle_any_t;
285
286typedef lnet_handle_any_t lnet_handle_eq_t;
287typedef lnet_handle_any_t lnet_handle_md_t;
288typedef lnet_handle_any_t lnet_handle_me_t;
289
290#define LNET_WIRE_HANDLE_COOKIE_NONE (-1)
291
292/**
293 * Invalidate handle \a h.
294 */
295static inline void LNetInvalidateHandle(lnet_handle_any_t *h)
296{
297 h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
298}
299
300/**
301 * Compare handles \a h1 and \a h2.
302 *
303 * \return 1 if handles are equal, 0 if otherwise.
304 */
568414f1 305static inline int LNetHandleIsEqual(lnet_handle_any_t h1, lnet_handle_any_t h2)
d7e09d03 306{
e4422943 307 return h1.cookie == h2.cookie;
d7e09d03
PT
308}
309
310/**
311 * Check whether handle \a h is invalid.
312 *
313 * \return 1 if handle is invalid, 0 if valid.
314 */
315static inline int LNetHandleIsInvalid(lnet_handle_any_t h)
316{
625a3121 317 return h.cookie == LNET_WIRE_HANDLE_COOKIE_NONE;
d7e09d03
PT
318}
319
320/**
321 * Global process ID.
322 */
323typedef struct {
324 /** node id */
325 lnet_nid_t nid;
326 /** process id */
327 lnet_pid_t pid;
328} lnet_process_id_t;
329/** @} lnet_addr */
330
331/** \addtogroup lnet_me
4420cfd3
JS
332 * @{
333 */
d7e09d03
PT
334
335/**
336 * Specifies whether the match entry or memory descriptor should be unlinked
337 * automatically (LNET_UNLINK) or not (LNET_RETAIN).
338 */
339typedef enum {
340 LNET_RETAIN = 0,
341 LNET_UNLINK
342} lnet_unlink_t;
343
344/**
345 * Values of the type lnet_ins_pos_t are used to control where a new match
346 * entry is inserted. The value LNET_INS_BEFORE is used to insert the new
347 * entry before the current entry or before the head of the list. The value
348 * LNET_INS_AFTER is used to insert the new entry after the current entry
349 * or after the last item in the list.
350 */
351typedef enum {
352 /** insert ME before current position or head of the list */
353 LNET_INS_BEFORE,
354 /** insert ME after current position or tail of the list */
355 LNET_INS_AFTER,
356 /** attach ME at tail of local CPU partition ME list */
357 LNET_INS_LOCAL
358} lnet_ins_pos_t;
359
360/** @} lnet_me */
361
362/** \addtogroup lnet_md
4420cfd3
JS
363 * @{
364 */
d7e09d03
PT
365
366/**
367 * Defines the visible parts of a memory descriptor. Values of this type
368 * are used to initialize memory descriptors.
369 */
370typedef struct {
371 /**
372 * Specify the memory region associated with the memory descriptor.
373 * If the options field has:
374 * - LNET_MD_KIOV bit set: The start field points to the starting
375 * address of an array of lnet_kiov_t and the length field specifies
376 * the number of entries in the array. The length can't be bigger
377 * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
d766b4b5 378 * fragments that are not necessarily mapped in virtual memory.
d7e09d03
PT
379 * - LNET_MD_IOVEC bit set: The start field points to the starting
380 * address of an array of struct iovec and the length field specifies
381 * the number of entries in the array. The length can't be bigger
382 * than LNET_MAX_IOV. The struct iovec is used to describe fragments
383 * that have virtual addresses.
384 * - Otherwise: The memory region is contiguous. The start field
385 * specifies the starting address for the memory region and the
386 * length field specifies its length.
387 *
388 * When the memory region is fragmented, all fragments but the first
389 * one must start on page boundary, and all but the last must end on
390 * page boundary.
391 */
188acc61
JS
392 void *start;
393 unsigned int length;
d7e09d03
PT
394 /**
395 * Specifies the maximum number of operations that can be performed
396 * on the memory descriptor. An operation is any action that could
397 * possibly generate an event. In the usual case, the threshold value
398 * is decremented for each operation on the MD. When the threshold
399 * drops to zero, the MD becomes inactive and does not respond to
400 * operations. A threshold value of LNET_MD_THRESH_INF indicates that
401 * there is no bound on the number of operations that may be applied
402 * to a MD.
403 */
188acc61 404 int threshold;
d7e09d03
PT
405 /**
406 * Specifies the largest incoming request that the memory descriptor
407 * should respond to. When the unused portion of a MD (length -
408 * local offset) falls below this value, the MD becomes inactive and
409 * does not respond to further operations. This value is only used
410 * if the LNET_MD_MAX_SIZE option is set.
411 */
188acc61 412 int max_size;
d7e09d03
PT
413 /**
414 * Specifies the behavior of the memory descriptor. A bitwise OR
415 * of the following values can be used:
416 * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD.
417 * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD.
418 * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory
419 * region is provided by the incoming request. By default, the
420 * offset is maintained locally. When maintained locally, the
421 * offset is incremented by the length of the request so that
422 * the next operation (PUT or GET) will access the next part of
423 * the memory region. Note that only one offset variable exists
424 * per memory descriptor. If both PUT and GET operations are
425 * performed on a memory descriptor, the offset is updated each time.
426 * - LNET_MD_TRUNCATE: The length provided in the incoming request can
427 * be reduced to match the memory available in the region (determined
428 * by subtracting the offset from the length of the memory region).
429 * By default, if the length in the incoming operation is greater
430 * than the amount of memory available, the operation is rejected.
431 * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for
432 * incoming PUT operations, even if requested. By default,
433 * acknowledgments are sent for PUT operations that request an
434 * acknowledgment. Acknowledgments are never sent for GET operations.
435 * The data sent in the REPLY serves as an implicit acknowledgment.
436 * - LNET_MD_KIOV: The start and length fields specify an array of
437 * lnet_kiov_t.
438 * - LNET_MD_IOVEC: The start and length fields specify an array of
439 * struct iovec.
440 * - LNET_MD_MAX_SIZE: The max_size field is valid.
441 *
442 * Note:
443 * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather
444 * capability for memory descriptors. They can't be both set.
445 * - When LNET_MD_MAX_SIZE is set, the total length of the memory
446 * region (i.e. sum of all fragment lengths) must not be less than
447 * \a max_size.
448 */
188acc61 449 unsigned int options;
d7e09d03
PT
450 /**
451 * A user-specified value that is associated with the memory
452 * descriptor. The value does not need to be a pointer, but must fit
453 * in the space used by a pointer. This value is recorded in events
454 * associated with operations on this MD.
455 */
188acc61 456 void *user_ptr;
d7e09d03
PT
457 /**
458 * A handle for the event queue used to log the operations performed on
459 * the memory region. If this argument is a NULL handle (i.e. nullified
460 * by LNetInvalidateHandle()), operations performed on this memory
461 * descriptor are not logged.
462 */
463 lnet_handle_eq_t eq_handle;
464} lnet_md_t;
465
4420cfd3
JS
466/*
467 * Max Transfer Unit (minimum supported everywhere).
d7e09d03 468 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
4420cfd3
JS
469 * these limits are system wide and not interface-local.
470 */
d7e09d03
PT
471#define LNET_MTU_BITS 20
472#define LNET_MTU (1 << LNET_MTU_BITS)
473
474/** limit on the number of fragments in discontiguous MDs */
188acc61 475#define LNET_MAX_IOV 256
d7e09d03 476
d7e09d03
PT
477/**
478 * Options for the MD structure. See lnet_md_t::options.
479 */
188acc61 480#define LNET_MD_OP_PUT (1 << 0)
d7e09d03 481/** See lnet_md_t::options. */
188acc61 482#define LNET_MD_OP_GET (1 << 1)
d7e09d03
PT
483/** See lnet_md_t::options. */
484#define LNET_MD_MANAGE_REMOTE (1 << 2)
188acc61 485/* unused (1 << 3) */
d7e09d03 486/** See lnet_md_t::options. */
188acc61 487#define LNET_MD_TRUNCATE (1 << 4)
d7e09d03 488/** See lnet_md_t::options. */
188acc61 489#define LNET_MD_ACK_DISABLE (1 << 5)
d7e09d03
PT
490/** See lnet_md_t::options. */
491#define LNET_MD_IOVEC (1 << 6)
492/** See lnet_md_t::options. */
188acc61 493#define LNET_MD_MAX_SIZE (1 << 7)
d7e09d03 494/** See lnet_md_t::options. */
188acc61 495#define LNET_MD_KIOV (1 << 8)
d7e09d03
PT
496
497/* For compatibility with Cray Portals */
188acc61 498#define LNET_MD_PHYS 0
d7e09d03
PT
499
500/** Infinite threshold on MD operations. See lnet_md_t::threshold */
188acc61 501#define LNET_MD_THRESH_INF (-1)
d7e09d03
PT
502
503/* NB lustre portals uses struct iovec internally! */
504typedef struct iovec lnet_md_iovec_t;
505
506/**
507 * A page-based fragment of a MD.
508 */
509typedef struct {
510 /** Pointer to the page where the fragment resides */
188acc61 511 struct page *kiov_page;
d7e09d03 512 /** Length in bytes of the fragment */
188acc61 513 unsigned int kiov_len;
d7e09d03
PT
514 /**
515 * Starting offset of the fragment within the page. Note that the
516 * end of the fragment must not pass the end of the page; i.e.,
ea1754a0 517 * kiov_len + kiov_offset <= PAGE_SIZE.
d7e09d03 518 */
188acc61 519 unsigned int kiov_offset;
d7e09d03
PT
520} lnet_kiov_t;
521/** @} lnet_md */
522
523/** \addtogroup lnet_eq
4420cfd3
JS
524 * @{
525 */
d7e09d03
PT
526
527/**
528 * Six types of events can be logged in an event queue.
529 */
530typedef enum {
531 /** An incoming GET operation has completed on the MD. */
532 LNET_EVENT_GET = 1,
533 /**
534 * An incoming PUT operation has completed on the MD. The
535 * underlying layers will not alter the memory (on behalf of this
536 * operation) once this event has been logged.
537 */
538 LNET_EVENT_PUT,
539 /**
540 * A REPLY operation has completed. This event is logged after the
541 * data (if any) from the REPLY has been written into the MD.
542 */
543 LNET_EVENT_REPLY,
544 /** An acknowledgment has been received. */
545 LNET_EVENT_ACK,
546 /**
547 * An outgoing send (PUT or GET) operation has completed. This event
548 * is logged after the entire buffer has been sent and it is safe for
549 * the caller to reuse the buffer.
550 *
551 * Note:
552 * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can
553 * happen even when the message has not yet been put out on wire.
554 * - It's unsafe to assume that in an outgoing GET operation
555 * the LNET_EVENT_SEND event would happen before the
556 * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and
557 * LNET_EVENT_ACK events in an outgoing PUT operation.
558 */
559 LNET_EVENT_SEND,
560 /**
561 * A MD has been unlinked. Note that LNetMDUnlink() does not
562 * necessarily trigger an LNET_EVENT_UNLINK event.
563 * \see LNetMDUnlink
564 */
565 LNET_EVENT_UNLINK,
566} lnet_event_kind_t;
567
188acc61 568#define LNET_SEQ_BASETYPE long
d7e09d03 569typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
e4422943 570#define LNET_SEQ_GT(a, b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
d7e09d03 571
d7e09d03
PT
572/**
573 * Information about an event on a MD.
574 */
575typedef struct {
576 /** The identifier (nid, pid) of the target. */
188acc61 577 lnet_process_id_t target;
d7e09d03 578 /** The identifier (nid, pid) of the initiator. */
188acc61 579 lnet_process_id_t initiator;
d7e09d03
PT
580 /**
581 * The NID of the immediate sender. If the request has been forwarded
582 * by routers, this is the NID of the last hop; otherwise it's the
583 * same as the initiator.
584 */
188acc61 585 lnet_nid_t sender;
d7e09d03 586 /** Indicates the type of the event. */
188acc61 587 lnet_event_kind_t type;
d7e09d03 588 /** The portal table index specified in the request */
188acc61 589 unsigned int pt_index;
d7e09d03 590 /** A copy of the match bits specified in the request. */
188acc61 591 __u64 match_bits;
d7e09d03 592 /** The length (in bytes) specified in the request. */
188acc61 593 unsigned int rlength;
d7e09d03
PT
594 /**
595 * The length (in bytes) of the data that was manipulated by the
596 * operation. For truncated operations, the manipulated length will be
597 * the number of bytes specified by the MD (possibly with an offset,
598 * see lnet_md_t). For all other operations, the manipulated length
599 * will be the length of the requested operation, i.e. rlength.
600 */
188acc61 601 unsigned int mlength;
d7e09d03
PT
602 /**
603 * The handle to the MD associated with the event. The handle may be
604 * invalid if the MD has been unlinked.
605 */
188acc61 606 lnet_handle_md_t md_handle;
d7e09d03
PT
607 /**
608 * A snapshot of the state of the MD immediately after the event has
609 * been processed. In particular, the threshold field in md will
610 * reflect the value of the threshold after the operation occurred.
611 */
188acc61 612 lnet_md_t md;
d7e09d03
PT
613 /**
614 * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
615 * \see LNetPut
616 */
188acc61 617 __u64 hdr_data;
d7e09d03
PT
618 /**
619 * Indicates the completion status of the operation. It's 0 for
620 * successful operations, otherwise it's an error code.
621 */
188acc61 622 int status;
d7e09d03
PT
623 /**
624 * Indicates whether the MD has been unlinked. Note that:
625 * - An event with unlinked set is the last event on the MD.
626 * - This field is also set for an explicit LNET_EVENT_UNLINK event.
627 * \see LNetMDUnlink
628 */
188acc61 629 int unlinked;
d7e09d03
PT
630 /**
631 * The displacement (in bytes) into the memory region that the
632 * operation used. The offset can be determined by the operation for
633 * a remote managed MD or by the local MD.
634 * \see lnet_md_t::options
635 */
188acc61 636 unsigned int offset;
d7e09d03
PT
637 /**
638 * The sequence number for this event. Sequence numbers are unique
639 * to each event.
640 */
188acc61 641 volatile lnet_seq_t sequence;
d7e09d03 642} lnet_event_t;
d7e09d03
PT
643
644/**
645 * Event queue handler function type.
646 *
647 * The EQ handler runs for each event that is deposited into the EQ. The
648 * handler is supplied with a pointer to the event that triggered the
649 * handler invocation.
650 *
651 * The handler must not block, must be reentrant, and must not call any LNet
652 * API functions. It should return as quickly as possible.
653 */
654typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
655#define LNET_EQ_HANDLER_NONE NULL
656/** @} lnet_eq */
657
658/** \addtogroup lnet_data
4420cfd3
JS
659 * @{
660 */
d7e09d03
PT
661
662/**
663 * Specify whether an acknowledgment should be sent by target when the PUT
664 * operation completes (i.e., when the data has been written to a MD of the
665 * target process).
666 *
667 * \see lnet_md_t::options for the discussion on LNET_MD_ACK_DISABLE by which
668 * acknowledgments can be disabled for a MD.
669 */
670typedef enum {
671 /** Request an acknowledgment */
672 LNET_ACK_REQ,
673 /** Request that no acknowledgment should be generated. */
674 LNET_NOACK_REQ
675} lnet_ack_req_t;
676/** @} lnet_data */
677
678/** @} lnet */
679#endif