staging:lustre: fix camel case for LNetInit and LNetFini
[linux-2.6-block.git] / drivers / staging / lustre / include / linux / lnet / types.h
CommitLineData
d7e09d03
PT
1/*
2 * GPL HEADER START
3 *
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19 *
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
22 * have any questions.
23 *
24 * GPL HEADER END
25 */
26/*
27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
29 *
30 * Copyright (c) 2012, Intel Corporation.
31 */
32/*
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
35 */
36
37#ifndef __LNET_TYPES_H__
38#define __LNET_TYPES_H__
39
db18b8e9
JS
40#include <linux/types.h>
41
d7e09d03
PT
42/** \addtogroup lnet
43 * @{ */
44
d7e09d03
PT
45/** \addtogroup lnet_addr
46 * @{ */
47
48/** Portal reserved for LNet's own use.
49 * \see lustre/include/lustre/lustre_idl.h for Lustre portal assignments.
50 */
51#define LNET_RESERVED_PORTAL 0
52
53/**
54 * Address of an end-point in an LNet network.
55 *
56 * A node can have multiple end-points and hence multiple addresses.
57 * An LNet network can be a simple network (e.g. tcp0) or a network of
58 * LNet networks connected by LNet routers. Therefore an end-point address
59 * has two parts: network ID, and address within a network.
60 *
61 * \see LNET_NIDNET, LNET_NIDADDR, and LNET_MKNID.
62 */
63typedef __u64 lnet_nid_t;
64/**
65 * ID of a process in a node. Shortened as PID to distinguish from
66 * lnet_process_id_t, the global process ID.
67 */
68typedef __u32 lnet_pid_t;
69
70/** wildcard NID that matches any end-point address */
71#define LNET_NID_ANY ((lnet_nid_t) -1)
72/** wildcard PID that matches any lnet_pid_t */
73#define LNET_PID_ANY ((lnet_pid_t) -1)
74
75#define LNET_PID_RESERVED 0xf0000000 /* reserved bits in PID */
76#define LNET_PID_USERFLAG 0x80000000 /* set in userspace peers */
db18b8e9 77#define LNET_PID_LUSTRE 12345
d7e09d03
PT
78
79#define LNET_TIME_FOREVER (-1)
80
bbf00c3d
JS
81/* how an LNET NID encodes net:address */
82/** extract the address part of an lnet_nid_t */
83
84static inline __u32 LNET_NIDADDR(lnet_nid_t nid)
85{
86 return nid & 0xffffffff;
87}
88
89static inline __u32 LNET_NIDNET(lnet_nid_t nid)
90{
91 return (nid >> 32) & 0xffffffff;
92}
93
94static inline lnet_nid_t LNET_MKNID(__u32 net, __u32 addr)
95{
96 return (((__u64)net) << 32) | addr;
97}
98
99static inline __u32 LNET_NETNUM(__u32 net)
100{
101 return net & 0xffff;
102}
103
104static inline __u32 LNET_NETTYP(__u32 net)
105{
106 return (net >> 16) & 0xffff;
107}
108
109static inline __u32 LNET_MKNET(__u32 type, __u32 num)
110{
111 return (type << 16) | num;
112}
113
db18b8e9
JS
114#define WIRE_ATTR __packed
115
116/* Packed version of lnet_process_id_t to transfer via network */
117typedef struct {
118 /* node id / process id */
119 lnet_nid_t nid;
120 lnet_pid_t pid;
121} WIRE_ATTR lnet_process_id_packed_t;
122
123/* The wire handle's interface cookie only matches one network interface in
124 * one epoch (i.e. new cookie when the interface restarts or the node
125 * reboots). The object cookie only matches one object on that interface
126 * during that object's lifetime (i.e. no cookie re-use). */
127typedef struct {
128 __u64 wh_interface_cookie;
129 __u64 wh_object_cookie;
130} WIRE_ATTR lnet_handle_wire_t;
131
132typedef enum {
133 LNET_MSG_ACK = 0,
134 LNET_MSG_PUT,
135 LNET_MSG_GET,
136 LNET_MSG_REPLY,
137 LNET_MSG_HELLO,
138} lnet_msg_type_t;
139
140/* The variant fields of the portals message header are aligned on an 8
141 * byte boundary in the message header. Note that all types used in these
142 * wire structs MUST be fixed size and the smaller types are placed at the
143 * end. */
144typedef struct lnet_ack {
145 lnet_handle_wire_t dst_wmd;
146 __u64 match_bits;
147 __u32 mlength;
148} WIRE_ATTR lnet_ack_t;
149
150typedef struct lnet_put {
151 lnet_handle_wire_t ack_wmd;
152 __u64 match_bits;
153 __u64 hdr_data;
154 __u32 ptl_index;
155 __u32 offset;
156} WIRE_ATTR lnet_put_t;
157
158typedef struct lnet_get {
159 lnet_handle_wire_t return_wmd;
160 __u64 match_bits;
161 __u32 ptl_index;
162 __u32 src_offset;
163 __u32 sink_length;
164} WIRE_ATTR lnet_get_t;
165
166typedef struct lnet_reply {
167 lnet_handle_wire_t dst_wmd;
168} WIRE_ATTR lnet_reply_t;
169
170typedef struct lnet_hello {
171 __u64 incarnation;
172 __u32 type;
173} WIRE_ATTR lnet_hello_t;
174
175typedef struct {
176 lnet_nid_t dest_nid;
177 lnet_nid_t src_nid;
178 lnet_pid_t dest_pid;
179 lnet_pid_t src_pid;
180 __u32 type; /* lnet_msg_type_t */
181 __u32 payload_length; /* payload data to follow */
182 /*<------__u64 aligned------->*/
183 union {
184 lnet_ack_t ack;
185 lnet_put_t put;
186 lnet_get_t get;
187 lnet_reply_t reply;
188 lnet_hello_t hello;
189 } msg;
190} WIRE_ATTR lnet_hdr_t;
191
192/* A HELLO message contains a magic number and protocol version
193 * code in the header's dest_nid, the peer's NID in the src_nid, and
194 * LNET_MSG_HELLO in the type field. All other common fields are zero
195 * (including payload_size; i.e. no payload).
196 * This is for use by byte-stream LNDs (e.g. TCP/IP) to check the peer is
197 * running the same protocol and to find out its NID. These LNDs should
198 * exchange HELLO messages when a connection is first established. Individual
199 * LNDs can put whatever else they fancy in lnet_hdr_t::msg.
200 */
201typedef struct {
202 __u32 magic; /* LNET_PROTO_TCP_MAGIC */
203 __u16 version_major; /* increment on incompatible change */
204 __u16 version_minor; /* increment on compatible change */
205} WIRE_ATTR lnet_magicversion_t;
206
207/* PROTO MAGIC for LNDs */
208#define LNET_PROTO_IB_MAGIC 0x0be91b91
209#define LNET_PROTO_GNI_MAGIC 0xb00fbabe /* ask Kim */
210#define LNET_PROTO_TCP_MAGIC 0xeebc0ded
211#define LNET_PROTO_ACCEPTOR_MAGIC 0xacce7100
212#define LNET_PROTO_PING_MAGIC 0x70696E67 /* 'ping' */
213
214/* Placeholder for a future "unified" protocol across all LNDs */
215/* Current LNDs that receive a request with this magic will respond with a
216 * "stub" reply using their current protocol */
217#define LNET_PROTO_MAGIC 0x45726963 /* ! */
218
219#define LNET_PROTO_TCP_VERSION_MAJOR 1
220#define LNET_PROTO_TCP_VERSION_MINOR 0
221
222/* Acceptor connection request */
223typedef struct {
224 __u32 acr_magic; /* PTL_ACCEPTOR_PROTO_MAGIC */
225 __u32 acr_version; /* protocol version */
226 __u64 acr_nid; /* target NID */
227} WIRE_ATTR lnet_acceptor_connreq_t;
228
229#define LNET_PROTO_ACCEPTOR_VERSION 1
230
231typedef struct {
232 lnet_nid_t ns_nid;
233 __u32 ns_status;
234 __u32 ns_unused;
235} WIRE_ATTR lnet_ni_status_t;
236
237typedef struct {
238 __u32 pi_magic;
239 __u32 pi_features;
240 lnet_pid_t pi_pid;
241 __u32 pi_nnis;
242 lnet_ni_status_t pi_ni[0];
243} WIRE_ATTR lnet_ping_info_t;
244
245typedef struct lnet_counters {
246 __u32 msgs_alloc;
247 __u32 msgs_max;
248 __u32 errors;
249 __u32 send_count;
250 __u32 recv_count;
251 __u32 route_count;
252 __u32 drop_count;
253 __u64 send_length;
254 __u64 recv_length;
255 __u64 route_length;
256 __u64 drop_length;
257} WIRE_ATTR lnet_counters_t;
258
259#define LNET_NI_STATUS_UP 0x15aac0de
260#define LNET_NI_STATUS_DOWN 0xdeadface
261#define LNET_NI_STATUS_INVALID 0x00000000
262
263#define LNET_MAX_INTERFACES 16
264
265/*
d7e09d03
PT
266 * Objects maintained by the LNet are accessed through handles. Handle types
267 * have names of the form lnet_handle_xx_t, where xx is one of the two letter
268 * object type codes ('eq' for event queue, 'md' for memory descriptor, and
269 * 'me' for match entry).
270 * Each type of object is given a unique handle type to enhance type checking.
271 * The type lnet_handle_any_t can be used when a generic handle is needed.
272 * Every handle value can be converted into a value of type lnet_handle_any_t
273 * without loss of information.
274 */
275typedef struct {
276 __u64 cookie;
277} lnet_handle_any_t;
278
279typedef lnet_handle_any_t lnet_handle_eq_t;
280typedef lnet_handle_any_t lnet_handle_md_t;
281typedef lnet_handle_any_t lnet_handle_me_t;
282
283#define LNET_WIRE_HANDLE_COOKIE_NONE (-1)
284
285/**
286 * Invalidate handle \a h.
287 */
288static inline void LNetInvalidateHandle(lnet_handle_any_t *h)
289{
290 h->cookie = LNET_WIRE_HANDLE_COOKIE_NONE;
291}
292
293/**
294 * Compare handles \a h1 and \a h2.
295 *
296 * \return 1 if handles are equal, 0 if otherwise.
297 */
568414f1 298static inline int LNetHandleIsEqual(lnet_handle_any_t h1, lnet_handle_any_t h2)
d7e09d03 299{
e4422943 300 return h1.cookie == h2.cookie;
d7e09d03
PT
301}
302
303/**
304 * Check whether handle \a h is invalid.
305 *
306 * \return 1 if handle is invalid, 0 if valid.
307 */
308static inline int LNetHandleIsInvalid(lnet_handle_any_t h)
309{
e4422943 310 return LNET_WIRE_HANDLE_COOKIE_NONE == h.cookie;
d7e09d03
PT
311}
312
313/**
314 * Global process ID.
315 */
316typedef struct {
317 /** node id */
318 lnet_nid_t nid;
319 /** process id */
320 lnet_pid_t pid;
321} lnet_process_id_t;
322/** @} lnet_addr */
323
324/** \addtogroup lnet_me
325 * @{ */
326
327/**
328 * Specifies whether the match entry or memory descriptor should be unlinked
329 * automatically (LNET_UNLINK) or not (LNET_RETAIN).
330 */
331typedef enum {
332 LNET_RETAIN = 0,
333 LNET_UNLINK
334} lnet_unlink_t;
335
336/**
337 * Values of the type lnet_ins_pos_t are used to control where a new match
338 * entry is inserted. The value LNET_INS_BEFORE is used to insert the new
339 * entry before the current entry or before the head of the list. The value
340 * LNET_INS_AFTER is used to insert the new entry after the current entry
341 * or after the last item in the list.
342 */
343typedef enum {
344 /** insert ME before current position or head of the list */
345 LNET_INS_BEFORE,
346 /** insert ME after current position or tail of the list */
347 LNET_INS_AFTER,
348 /** attach ME at tail of local CPU partition ME list */
349 LNET_INS_LOCAL
350} lnet_ins_pos_t;
351
352/** @} lnet_me */
353
354/** \addtogroup lnet_md
355 * @{ */
356
357/**
358 * Defines the visible parts of a memory descriptor. Values of this type
359 * are used to initialize memory descriptors.
360 */
361typedef struct {
362 /**
363 * Specify the memory region associated with the memory descriptor.
364 * If the options field has:
365 * - LNET_MD_KIOV bit set: The start field points to the starting
366 * address of an array of lnet_kiov_t and the length field specifies
367 * the number of entries in the array. The length can't be bigger
368 * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
d766b4b5 369 * fragments that are not necessarily mapped in virtual memory.
d7e09d03
PT
370 * - LNET_MD_IOVEC bit set: The start field points to the starting
371 * address of an array of struct iovec and the length field specifies
372 * the number of entries in the array. The length can't be bigger
373 * than LNET_MAX_IOV. The struct iovec is used to describe fragments
374 * that have virtual addresses.
375 * - Otherwise: The memory region is contiguous. The start field
376 * specifies the starting address for the memory region and the
377 * length field specifies its length.
378 *
379 * When the memory region is fragmented, all fragments but the first
380 * one must start on page boundary, and all but the last must end on
381 * page boundary.
382 */
383 void *start;
384 unsigned int length;
385 /**
386 * Specifies the maximum number of operations that can be performed
387 * on the memory descriptor. An operation is any action that could
388 * possibly generate an event. In the usual case, the threshold value
389 * is decremented for each operation on the MD. When the threshold
390 * drops to zero, the MD becomes inactive and does not respond to
391 * operations. A threshold value of LNET_MD_THRESH_INF indicates that
392 * there is no bound on the number of operations that may be applied
393 * to a MD.
394 */
395 int threshold;
396 /**
397 * Specifies the largest incoming request that the memory descriptor
398 * should respond to. When the unused portion of a MD (length -
399 * local offset) falls below this value, the MD becomes inactive and
400 * does not respond to further operations. This value is only used
401 * if the LNET_MD_MAX_SIZE option is set.
402 */
403 int max_size;
404 /**
405 * Specifies the behavior of the memory descriptor. A bitwise OR
406 * of the following values can be used:
407 * - LNET_MD_OP_PUT: The LNet PUT operation is allowed on this MD.
408 * - LNET_MD_OP_GET: The LNet GET operation is allowed on this MD.
409 * - LNET_MD_MANAGE_REMOTE: The offset used in accessing the memory
410 * region is provided by the incoming request. By default, the
411 * offset is maintained locally. When maintained locally, the
412 * offset is incremented by the length of the request so that
413 * the next operation (PUT or GET) will access the next part of
414 * the memory region. Note that only one offset variable exists
415 * per memory descriptor. If both PUT and GET operations are
416 * performed on a memory descriptor, the offset is updated each time.
417 * - LNET_MD_TRUNCATE: The length provided in the incoming request can
418 * be reduced to match the memory available in the region (determined
419 * by subtracting the offset from the length of the memory region).
420 * By default, if the length in the incoming operation is greater
421 * than the amount of memory available, the operation is rejected.
422 * - LNET_MD_ACK_DISABLE: An acknowledgment should not be sent for
423 * incoming PUT operations, even if requested. By default,
424 * acknowledgments are sent for PUT operations that request an
425 * acknowledgment. Acknowledgments are never sent for GET operations.
426 * The data sent in the REPLY serves as an implicit acknowledgment.
427 * - LNET_MD_KIOV: The start and length fields specify an array of
428 * lnet_kiov_t.
429 * - LNET_MD_IOVEC: The start and length fields specify an array of
430 * struct iovec.
431 * - LNET_MD_MAX_SIZE: The max_size field is valid.
432 *
433 * Note:
434 * - LNET_MD_KIOV or LNET_MD_IOVEC allows for a scatter/gather
435 * capability for memory descriptors. They can't be both set.
436 * - When LNET_MD_MAX_SIZE is set, the total length of the memory
437 * region (i.e. sum of all fragment lengths) must not be less than
438 * \a max_size.
439 */
440 unsigned int options;
441 /**
442 * A user-specified value that is associated with the memory
443 * descriptor. The value does not need to be a pointer, but must fit
444 * in the space used by a pointer. This value is recorded in events
445 * associated with operations on this MD.
446 */
447 void *user_ptr;
448 /**
449 * A handle for the event queue used to log the operations performed on
450 * the memory region. If this argument is a NULL handle (i.e. nullified
451 * by LNetInvalidateHandle()), operations performed on this memory
452 * descriptor are not logged.
453 */
454 lnet_handle_eq_t eq_handle;
455} lnet_md_t;
456
457/* Max Transfer Unit (minimum supported everywhere).
458 * CAVEAT EMPTOR, with multinet (i.e. routers forwarding between networks)
459 * these limits are system wide and not interface-local. */
460#define LNET_MTU_BITS 20
461#define LNET_MTU (1 << LNET_MTU_BITS)
462
463/** limit on the number of fragments in discontiguous MDs */
464#define LNET_MAX_IOV 256
465
d7e09d03
PT
466/**
467 * Options for the MD structure. See lnet_md_t::options.
468 */
469#define LNET_MD_OP_PUT (1 << 0)
470/** See lnet_md_t::options. */
471#define LNET_MD_OP_GET (1 << 1)
472/** See lnet_md_t::options. */
473#define LNET_MD_MANAGE_REMOTE (1 << 2)
474/* unused (1 << 3) */
475/** See lnet_md_t::options. */
476#define LNET_MD_TRUNCATE (1 << 4)
477/** See lnet_md_t::options. */
478#define LNET_MD_ACK_DISABLE (1 << 5)
479/** See lnet_md_t::options. */
480#define LNET_MD_IOVEC (1 << 6)
481/** See lnet_md_t::options. */
482#define LNET_MD_MAX_SIZE (1 << 7)
483/** See lnet_md_t::options. */
484#define LNET_MD_KIOV (1 << 8)
485
486/* For compatibility with Cray Portals */
487#define LNET_MD_PHYS 0
488
489/** Infinite threshold on MD operations. See lnet_md_t::threshold */
490#define LNET_MD_THRESH_INF (-1)
491
492/* NB lustre portals uses struct iovec internally! */
493typedef struct iovec lnet_md_iovec_t;
494
495/**
496 * A page-based fragment of a MD.
497 */
498typedef struct {
499 /** Pointer to the page where the fragment resides */
500 struct page *kiov_page;
501 /** Length in bytes of the fragment */
502 unsigned int kiov_len;
503 /**
504 * Starting offset of the fragment within the page. Note that the
505 * end of the fragment must not pass the end of the page; i.e.,
506 * kiov_len + kiov_offset <= PAGE_CACHE_SIZE.
507 */
508 unsigned int kiov_offset;
509} lnet_kiov_t;
510/** @} lnet_md */
511
512/** \addtogroup lnet_eq
513 * @{ */
514
515/**
516 * Six types of events can be logged in an event queue.
517 */
518typedef enum {
519 /** An incoming GET operation has completed on the MD. */
520 LNET_EVENT_GET = 1,
521 /**
522 * An incoming PUT operation has completed on the MD. The
523 * underlying layers will not alter the memory (on behalf of this
524 * operation) once this event has been logged.
525 */
526 LNET_EVENT_PUT,
527 /**
528 * A REPLY operation has completed. This event is logged after the
529 * data (if any) from the REPLY has been written into the MD.
530 */
531 LNET_EVENT_REPLY,
532 /** An acknowledgment has been received. */
533 LNET_EVENT_ACK,
534 /**
535 * An outgoing send (PUT or GET) operation has completed. This event
536 * is logged after the entire buffer has been sent and it is safe for
537 * the caller to reuse the buffer.
538 *
539 * Note:
540 * - The LNET_EVENT_SEND doesn't guarantee message delivery. It can
541 * happen even when the message has not yet been put out on wire.
542 * - It's unsafe to assume that in an outgoing GET operation
543 * the LNET_EVENT_SEND event would happen before the
544 * LNET_EVENT_REPLY event. The same holds for LNET_EVENT_SEND and
545 * LNET_EVENT_ACK events in an outgoing PUT operation.
546 */
547 LNET_EVENT_SEND,
548 /**
549 * A MD has been unlinked. Note that LNetMDUnlink() does not
550 * necessarily trigger an LNET_EVENT_UNLINK event.
551 * \see LNetMDUnlink
552 */
553 LNET_EVENT_UNLINK,
554} lnet_event_kind_t;
555
556#define LNET_SEQ_BASETYPE long
557typedef unsigned LNET_SEQ_BASETYPE lnet_seq_t;
e4422943 558#define LNET_SEQ_GT(a, b) (((signed LNET_SEQ_BASETYPE)((a) - (b))) > 0)
d7e09d03 559
d7e09d03
PT
560/**
561 * Information about an event on a MD.
562 */
563typedef struct {
564 /** The identifier (nid, pid) of the target. */
565 lnet_process_id_t target;
566 /** The identifier (nid, pid) of the initiator. */
567 lnet_process_id_t initiator;
568 /**
569 * The NID of the immediate sender. If the request has been forwarded
570 * by routers, this is the NID of the last hop; otherwise it's the
571 * same as the initiator.
572 */
573 lnet_nid_t sender;
574 /** Indicates the type of the event. */
575 lnet_event_kind_t type;
576 /** The portal table index specified in the request */
577 unsigned int pt_index;
578 /** A copy of the match bits specified in the request. */
579 __u64 match_bits;
580 /** The length (in bytes) specified in the request. */
581 unsigned int rlength;
582 /**
583 * The length (in bytes) of the data that was manipulated by the
584 * operation. For truncated operations, the manipulated length will be
585 * the number of bytes specified by the MD (possibly with an offset,
586 * see lnet_md_t). For all other operations, the manipulated length
587 * will be the length of the requested operation, i.e. rlength.
588 */
589 unsigned int mlength;
590 /**
591 * The handle to the MD associated with the event. The handle may be
592 * invalid if the MD has been unlinked.
593 */
594 lnet_handle_md_t md_handle;
595 /**
596 * A snapshot of the state of the MD immediately after the event has
597 * been processed. In particular, the threshold field in md will
598 * reflect the value of the threshold after the operation occurred.
599 */
600 lnet_md_t md;
601 /**
602 * 64 bits of out-of-band user data. Only valid for LNET_EVENT_PUT.
603 * \see LNetPut
604 */
605 __u64 hdr_data;
606 /**
607 * Indicates the completion status of the operation. It's 0 for
608 * successful operations, otherwise it's an error code.
609 */
610 int status;
611 /**
612 * Indicates whether the MD has been unlinked. Note that:
613 * - An event with unlinked set is the last event on the MD.
614 * - This field is also set for an explicit LNET_EVENT_UNLINK event.
615 * \see LNetMDUnlink
616 */
617 int unlinked;
618 /**
619 * The displacement (in bytes) into the memory region that the
620 * operation used. The offset can be determined by the operation for
621 * a remote managed MD or by the local MD.
622 * \see lnet_md_t::options
623 */
624 unsigned int offset;
625 /**
626 * The sequence number for this event. Sequence numbers are unique
627 * to each event.
628 */
629 volatile lnet_seq_t sequence;
630} lnet_event_t;
d7e09d03
PT
631
632/**
633 * Event queue handler function type.
634 *
635 * The EQ handler runs for each event that is deposited into the EQ. The
636 * handler is supplied with a pointer to the event that triggered the
637 * handler invocation.
638 *
639 * The handler must not block, must be reentrant, and must not call any LNet
640 * API functions. It should return as quickly as possible.
641 */
642typedef void (*lnet_eq_handler_t)(lnet_event_t *event);
643#define LNET_EQ_HANDLER_NONE NULL
644/** @} lnet_eq */
645
646/** \addtogroup lnet_data
647 * @{ */
648
649/**
650 * Specify whether an acknowledgment should be sent by target when the PUT
651 * operation completes (i.e., when the data has been written to a MD of the
652 * target process).
653 *
654 * \see lnet_md_t::options for the discussion on LNET_MD_ACK_DISABLE by which
655 * acknowledgments can be disabled for a MD.
656 */
657typedef enum {
658 /** Request an acknowledgment */
659 LNET_ACK_REQ,
660 /** Request that no acknowledgment should be generated. */
661 LNET_NOACK_REQ
662} lnet_ack_req_t;
663/** @} lnet_data */
664
665/** @} lnet */
666#endif