Commit | Line | Data |
---|---|---|
b411b363 PR |
1 | /* |
2 | drbd_int.h | |
3 | ||
4 | This file is part of DRBD by Philipp Reisner and Lars Ellenberg. | |
5 | ||
6 | Copyright (C) 2001-2008, LINBIT Information Technologies GmbH. | |
7 | Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>. | |
8 | Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>. | |
9 | ||
10 | drbd is free software; you can redistribute it and/or modify | |
11 | it under the terms of the GNU General Public License as published by | |
12 | the Free Software Foundation; either version 2, or (at your option) | |
13 | any later version. | |
14 | ||
15 | drbd is distributed in the hope that it will be useful, | |
16 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | GNU General Public License for more details. | |
19 | ||
20 | You should have received a copy of the GNU General Public License | |
21 | along with drbd; see the file COPYING. If not, write to | |
22 | the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. | |
23 | ||
24 | */ | |
25 | ||
26 | #ifndef _DRBD_INT_H | |
27 | #define _DRBD_INT_H | |
28 | ||
29 | #include <linux/compiler.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/version.h> | |
32 | #include <linux/list.h> | |
33 | #include <linux/sched.h> | |
34 | #include <linux/bitops.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/crypto.h> | |
132cc538 | 37 | #include <linux/ratelimit.h> |
b411b363 PR |
38 | #include <linux/tcp.h> |
39 | #include <linux/mutex.h> | |
40 | #include <linux/major.h> | |
41 | #include <linux/blkdev.h> | |
42 | #include <linux/genhd.h> | |
43 | #include <net/tcp.h> | |
44 | #include <linux/lru_cache.h> | |
45 | ||
46 | #ifdef __CHECKER__ | |
47 | # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr"))) | |
48 | # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read"))) | |
49 | # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write"))) | |
50 | # define __must_hold(x) __attribute__((context(x,1,1), require_context(x,1,999,"call"))) | |
51 | #else | |
52 | # define __protected_by(x) | |
53 | # define __protected_read_by(x) | |
54 | # define __protected_write_by(x) | |
55 | # define __must_hold(x) | |
56 | #endif | |
57 | ||
58 | #define __no_warn(lock, stmt) do { __acquire(lock); stmt; __release(lock); } while (0) | |
59 | ||
60 | /* module parameter, defined in drbd_main.c */ | |
61 | extern unsigned int minor_count; | |
62 | extern int disable_sendpage; | |
63 | extern int allow_oos; | |
64 | extern unsigned int cn_idx; | |
65 | ||
66 | #ifdef CONFIG_DRBD_FAULT_INJECTION | |
67 | extern int enable_faults; | |
68 | extern int fault_rate; | |
69 | extern int fault_devs; | |
70 | #endif | |
71 | ||
72 | extern char usermode_helper[]; | |
73 | ||
74 | ||
75 | #ifndef TRUE | |
76 | #define TRUE 1 | |
77 | #endif | |
78 | #ifndef FALSE | |
79 | #define FALSE 0 | |
80 | #endif | |
81 | ||
82 | /* I don't remember why XCPU ... | |
83 | * This is used to wake the asender, | |
84 | * and to interrupt sending the sending task | |
85 | * on disconnect. | |
86 | */ | |
87 | #define DRBD_SIG SIGXCPU | |
88 | ||
89 | /* This is used to stop/restart our threads. | |
90 | * Cannot use SIGTERM nor SIGKILL, since these | |
91 | * are sent out by init on runlevel changes | |
92 | * I choose SIGHUP for now. | |
93 | */ | |
94 | #define DRBD_SIGKILL SIGHUP | |
95 | ||
96 | /* All EEs on the free list should have ID_VACANT (== 0) | |
97 | * freshly allocated EEs get !ID_VACANT (== 1) | |
3ad2f3fb | 98 | * so if it says "cannot dereference null pointer at address 0x00000001", |
b411b363 PR |
99 | * it is most likely one of these :( */ |
100 | ||
101 | #define ID_IN_SYNC (4711ULL) | |
102 | #define ID_OUT_OF_SYNC (4712ULL) | |
103 | ||
104 | #define ID_SYNCER (-1ULL) | |
105 | #define ID_VACANT 0 | |
106 | #define is_syncer_block_id(id) ((id) == ID_SYNCER) | |
107 | ||
108 | struct drbd_conf; | |
109 | ||
110 | ||
111 | /* to shorten dev_warn(DEV, "msg"); and relatives statements */ | |
112 | #define DEV (disk_to_dev(mdev->vdisk)) | |
113 | ||
114 | #define D_ASSERT(exp) if (!(exp)) \ | |
115 | dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) | |
116 | ||
117 | #define ERR_IF(exp) if (({ \ | |
118 | int _b = (exp) != 0; \ | |
119 | if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ | |
120 | __func__, #exp, __FILE__, __LINE__); \ | |
121 | _b; \ | |
122 | })) | |
123 | ||
124 | /* Defines to control fault insertion */ | |
125 | enum { | |
126 | DRBD_FAULT_MD_WR = 0, /* meta data write */ | |
127 | DRBD_FAULT_MD_RD = 1, /* read */ | |
128 | DRBD_FAULT_RS_WR = 2, /* resync */ | |
129 | DRBD_FAULT_RS_RD = 3, | |
130 | DRBD_FAULT_DT_WR = 4, /* data */ | |
131 | DRBD_FAULT_DT_RD = 5, | |
132 | DRBD_FAULT_DT_RA = 6, /* data read ahead */ | |
133 | DRBD_FAULT_BM_ALLOC = 7, /* bitmap allocation */ | |
134 | DRBD_FAULT_AL_EE = 8, /* alloc ee */ | |
135 | ||
136 | DRBD_FAULT_MAX, | |
137 | }; | |
138 | ||
b411b363 PR |
139 | #ifdef CONFIG_DRBD_FAULT_INJECTION |
140 | extern unsigned int | |
141 | _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type); | |
142 | static inline int | |
143 | drbd_insert_fault(struct drbd_conf *mdev, unsigned int type) { | |
144 | return fault_rate && | |
145 | (enable_faults & (1<<type)) && | |
146 | _drbd_insert_fault(mdev, type); | |
147 | } | |
148 | #define FAULT_ACTIVE(_m, _t) (drbd_insert_fault((_m), (_t))) | |
149 | ||
150 | #else | |
151 | #define FAULT_ACTIVE(_m, _t) (0) | |
152 | #endif | |
153 | ||
154 | /* integer division, round _UP_ to the next integer */ | |
155 | #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0)) | |
156 | /* usual integer division */ | |
157 | #define div_floor(A, B) ((A)/(B)) | |
158 | ||
159 | /* drbd_meta-data.c (still in drbd_main.c) */ | |
160 | /* 4th incarnation of the disk layout. */ | |
161 | #define DRBD_MD_MAGIC (DRBD_MAGIC+4) | |
162 | ||
163 | extern struct drbd_conf **minor_table; | |
164 | extern struct ratelimit_state drbd_ratelimit_state; | |
165 | ||
166 | /* on the wire */ | |
167 | enum drbd_packets { | |
168 | /* receiver (data socket) */ | |
169 | P_DATA = 0x00, | |
170 | P_DATA_REPLY = 0x01, /* Response to P_DATA_REQUEST */ | |
171 | P_RS_DATA_REPLY = 0x02, /* Response to P_RS_DATA_REQUEST */ | |
172 | P_BARRIER = 0x03, | |
173 | P_BITMAP = 0x04, | |
174 | P_BECOME_SYNC_TARGET = 0x05, | |
175 | P_BECOME_SYNC_SOURCE = 0x06, | |
176 | P_UNPLUG_REMOTE = 0x07, /* Used at various times to hint the peer */ | |
177 | P_DATA_REQUEST = 0x08, /* Used to ask for a data block */ | |
178 | P_RS_DATA_REQUEST = 0x09, /* Used to ask for a data block for resync */ | |
179 | P_SYNC_PARAM = 0x0a, | |
180 | P_PROTOCOL = 0x0b, | |
181 | P_UUIDS = 0x0c, | |
182 | P_SIZES = 0x0d, | |
183 | P_STATE = 0x0e, | |
184 | P_SYNC_UUID = 0x0f, | |
185 | P_AUTH_CHALLENGE = 0x10, | |
186 | P_AUTH_RESPONSE = 0x11, | |
187 | P_STATE_CHG_REQ = 0x12, | |
188 | ||
189 | /* asender (meta socket */ | |
190 | P_PING = 0x13, | |
191 | P_PING_ACK = 0x14, | |
192 | P_RECV_ACK = 0x15, /* Used in protocol B */ | |
193 | P_WRITE_ACK = 0x16, /* Used in protocol C */ | |
194 | P_RS_WRITE_ACK = 0x17, /* Is a P_WRITE_ACK, additionally call set_in_sync(). */ | |
195 | P_DISCARD_ACK = 0x18, /* Used in proto C, two-primaries conflict detection */ | |
196 | P_NEG_ACK = 0x19, /* Sent if local disk is unusable */ | |
197 | P_NEG_DREPLY = 0x1a, /* Local disk is broken... */ | |
198 | P_NEG_RS_DREPLY = 0x1b, /* Local disk is broken... */ | |
199 | P_BARRIER_ACK = 0x1c, | |
200 | P_STATE_CHG_REPLY = 0x1d, | |
201 | ||
202 | /* "new" commands, no longer fitting into the ordering scheme above */ | |
203 | ||
204 | P_OV_REQUEST = 0x1e, /* data socket */ | |
205 | P_OV_REPLY = 0x1f, | |
206 | P_OV_RESULT = 0x20, /* meta socket */ | |
207 | P_CSUM_RS_REQUEST = 0x21, /* data socket */ | |
208 | P_RS_IS_IN_SYNC = 0x22, /* meta socket */ | |
209 | P_SYNC_PARAM89 = 0x23, /* data socket, protocol version 89 replacement for P_SYNC_PARAM */ | |
210 | P_COMPRESSED_BITMAP = 0x24, /* compressed or otherwise encoded bitmap transfer */ | |
211 | ||
212 | P_MAX_CMD = 0x25, | |
213 | P_MAY_IGNORE = 0x100, /* Flag to test if (cmd > P_MAY_IGNORE) ... */ | |
214 | P_MAX_OPT_CMD = 0x101, | |
215 | ||
216 | /* special command ids for handshake */ | |
217 | ||
218 | P_HAND_SHAKE_M = 0xfff1, /* First Packet on the MetaSock */ | |
219 | P_HAND_SHAKE_S = 0xfff2, /* First Packet on the Socket */ | |
220 | ||
221 | P_HAND_SHAKE = 0xfffe /* FIXED for the next century! */ | |
222 | }; | |
223 | ||
224 | static inline const char *cmdname(enum drbd_packets cmd) | |
225 | { | |
226 | /* THINK may need to become several global tables | |
227 | * when we want to support more than | |
228 | * one PRO_VERSION */ | |
229 | static const char *cmdnames[] = { | |
230 | [P_DATA] = "Data", | |
231 | [P_DATA_REPLY] = "DataReply", | |
232 | [P_RS_DATA_REPLY] = "RSDataReply", | |
233 | [P_BARRIER] = "Barrier", | |
234 | [P_BITMAP] = "ReportBitMap", | |
235 | [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget", | |
236 | [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource", | |
237 | [P_UNPLUG_REMOTE] = "UnplugRemote", | |
238 | [P_DATA_REQUEST] = "DataRequest", | |
239 | [P_RS_DATA_REQUEST] = "RSDataRequest", | |
240 | [P_SYNC_PARAM] = "SyncParam", | |
241 | [P_SYNC_PARAM89] = "SyncParam89", | |
242 | [P_PROTOCOL] = "ReportProtocol", | |
243 | [P_UUIDS] = "ReportUUIDs", | |
244 | [P_SIZES] = "ReportSizes", | |
245 | [P_STATE] = "ReportState", | |
246 | [P_SYNC_UUID] = "ReportSyncUUID", | |
247 | [P_AUTH_CHALLENGE] = "AuthChallenge", | |
248 | [P_AUTH_RESPONSE] = "AuthResponse", | |
249 | [P_PING] = "Ping", | |
250 | [P_PING_ACK] = "PingAck", | |
251 | [P_RECV_ACK] = "RecvAck", | |
252 | [P_WRITE_ACK] = "WriteAck", | |
253 | [P_RS_WRITE_ACK] = "RSWriteAck", | |
254 | [P_DISCARD_ACK] = "DiscardAck", | |
255 | [P_NEG_ACK] = "NegAck", | |
256 | [P_NEG_DREPLY] = "NegDReply", | |
257 | [P_NEG_RS_DREPLY] = "NegRSDReply", | |
258 | [P_BARRIER_ACK] = "BarrierAck", | |
259 | [P_STATE_CHG_REQ] = "StateChgRequest", | |
260 | [P_STATE_CHG_REPLY] = "StateChgReply", | |
261 | [P_OV_REQUEST] = "OVRequest", | |
262 | [P_OV_REPLY] = "OVReply", | |
263 | [P_OV_RESULT] = "OVResult", | |
c42b6cf4 LE |
264 | [P_CSUM_RS_REQUEST] = "CsumRSRequest", |
265 | [P_RS_IS_IN_SYNC] = "CsumRSIsInSync", | |
266 | [P_COMPRESSED_BITMAP] = "CBitmap", | |
b411b363 PR |
267 | [P_MAX_CMD] = NULL, |
268 | }; | |
269 | ||
270 | if (cmd == P_HAND_SHAKE_M) | |
271 | return "HandShakeM"; | |
272 | if (cmd == P_HAND_SHAKE_S) | |
273 | return "HandShakeS"; | |
274 | if (cmd == P_HAND_SHAKE) | |
275 | return "HandShake"; | |
276 | if (cmd >= P_MAX_CMD) | |
277 | return "Unknown"; | |
278 | return cmdnames[cmd]; | |
279 | } | |
280 | ||
281 | /* for sending/receiving the bitmap, | |
282 | * possibly in some encoding scheme */ | |
283 | struct bm_xfer_ctx { | |
284 | /* "const" | |
285 | * stores total bits and long words | |
286 | * of the bitmap, so we don't need to | |
287 | * call the accessor functions over and again. */ | |
288 | unsigned long bm_bits; | |
289 | unsigned long bm_words; | |
290 | /* during xfer, current position within the bitmap */ | |
291 | unsigned long bit_offset; | |
292 | unsigned long word_offset; | |
293 | ||
294 | /* statistics; index: (h->command == P_BITMAP) */ | |
295 | unsigned packets[2]; | |
296 | unsigned bytes[2]; | |
297 | }; | |
298 | ||
299 | extern void INFO_bm_xfer_stats(struct drbd_conf *mdev, | |
300 | const char *direction, struct bm_xfer_ctx *c); | |
301 | ||
302 | static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) | |
303 | { | |
304 | /* word_offset counts "native long words" (32 or 64 bit), | |
305 | * aligned at 64 bit. | |
306 | * Encoded packet may end at an unaligned bit offset. | |
307 | * In case a fallback clear text packet is transmitted in | |
308 | * between, we adjust this offset back to the last 64bit | |
309 | * aligned "native long word", which makes coding and decoding | |
310 | * the plain text bitmap much more convenient. */ | |
311 | #if BITS_PER_LONG == 64 | |
312 | c->word_offset = c->bit_offset >> 6; | |
313 | #elif BITS_PER_LONG == 32 | |
314 | c->word_offset = c->bit_offset >> 5; | |
315 | c->word_offset &= ~(1UL); | |
316 | #else | |
317 | # error "unsupported BITS_PER_LONG" | |
318 | #endif | |
319 | } | |
320 | ||
321 | #ifndef __packed | |
322 | #define __packed __attribute__((packed)) | |
323 | #endif | |
324 | ||
325 | /* This is the layout for a packet on the wire. | |
326 | * The byteorder is the network byte order. | |
327 | * (except block_id and barrier fields. | |
328 | * these are pointers to local structs | |
329 | * and have no relevance for the partner, | |
330 | * which just echoes them as received.) | |
331 | * | |
332 | * NOTE that the payload starts at a long aligned offset, | |
333 | * regardless of 32 or 64 bit arch! | |
334 | */ | |
335 | struct p_header { | |
336 | u32 magic; | |
337 | u16 command; | |
338 | u16 length; /* bytes of data after this header */ | |
339 | u8 payload[0]; | |
340 | } __packed; | |
341 | /* 8 bytes. packet FIXED for the next century! */ | |
342 | ||
343 | /* | |
344 | * short commands, packets without payload, plain p_header: | |
345 | * P_PING | |
346 | * P_PING_ACK | |
347 | * P_BECOME_SYNC_TARGET | |
348 | * P_BECOME_SYNC_SOURCE | |
349 | * P_UNPLUG_REMOTE | |
350 | */ | |
351 | ||
352 | /* | |
353 | * commands with out-of-struct payload: | |
354 | * P_BITMAP (no additional fields) | |
355 | * P_DATA, P_DATA_REPLY (see p_data) | |
356 | * P_COMPRESSED_BITMAP (see receive_compressed_bitmap) | |
357 | */ | |
358 | ||
359 | /* these defines must not be changed without changing the protocol version */ | |
360 | #define DP_HARDBARRIER 1 | |
361 | #define DP_RW_SYNC 2 | |
362 | #define DP_MAY_SET_IN_SYNC 4 | |
363 | ||
364 | struct p_data { | |
365 | struct p_header head; | |
366 | u64 sector; /* 64 bits sector number */ | |
367 | u64 block_id; /* to identify the request in protocol B&C */ | |
368 | u32 seq_num; | |
369 | u32 dp_flags; | |
370 | } __packed; | |
371 | ||
372 | /* | |
373 | * commands which share a struct: | |
374 | * p_block_ack: | |
375 | * P_RECV_ACK (proto B), P_WRITE_ACK (proto C), | |
376 | * P_DISCARD_ACK (proto C, two-primaries conflict detection) | |
377 | * p_block_req: | |
378 | * P_DATA_REQUEST, P_RS_DATA_REQUEST | |
379 | */ | |
380 | struct p_block_ack { | |
381 | struct p_header head; | |
382 | u64 sector; | |
383 | u64 block_id; | |
384 | u32 blksize; | |
385 | u32 seq_num; | |
386 | } __packed; | |
387 | ||
388 | ||
389 | struct p_block_req { | |
390 | struct p_header head; | |
391 | u64 sector; | |
392 | u64 block_id; | |
393 | u32 blksize; | |
394 | u32 pad; /* to multiple of 8 Byte */ | |
395 | } __packed; | |
396 | ||
397 | /* | |
398 | * commands with their own struct for additional fields: | |
399 | * P_HAND_SHAKE | |
400 | * P_BARRIER | |
401 | * P_BARRIER_ACK | |
402 | * P_SYNC_PARAM | |
403 | * ReportParams | |
404 | */ | |
405 | ||
406 | struct p_handshake { | |
407 | struct p_header head; /* 8 bytes */ | |
408 | u32 protocol_min; | |
409 | u32 feature_flags; | |
410 | u32 protocol_max; | |
411 | ||
412 | /* should be more than enough for future enhancements | |
413 | * for now, feature_flags and the reserverd array shall be zero. | |
414 | */ | |
415 | ||
416 | u32 _pad; | |
417 | u64 reserverd[7]; | |
418 | } __packed; | |
419 | /* 80 bytes, FIXED for the next century */ | |
420 | ||
421 | struct p_barrier { | |
422 | struct p_header head; | |
423 | u32 barrier; /* barrier number _handle_ only */ | |
424 | u32 pad; /* to multiple of 8 Byte */ | |
425 | } __packed; | |
426 | ||
427 | struct p_barrier_ack { | |
428 | struct p_header head; | |
429 | u32 barrier; | |
430 | u32 set_size; | |
431 | } __packed; | |
432 | ||
433 | struct p_rs_param { | |
434 | struct p_header head; | |
435 | u32 rate; | |
436 | ||
437 | /* Since protocol version 88 and higher. */ | |
438 | char verify_alg[0]; | |
439 | } __packed; | |
440 | ||
441 | struct p_rs_param_89 { | |
442 | struct p_header head; | |
443 | u32 rate; | |
444 | /* protocol version 89: */ | |
445 | char verify_alg[SHARED_SECRET_MAX]; | |
446 | char csums_alg[SHARED_SECRET_MAX]; | |
447 | } __packed; | |
448 | ||
cf14c2e9 PR |
449 | enum drbd_conn_flags { |
450 | CF_WANT_LOSE = 1, | |
451 | CF_DRY_RUN = 2, | |
452 | }; | |
453 | ||
b411b363 PR |
454 | struct p_protocol { |
455 | struct p_header head; | |
456 | u32 protocol; | |
457 | u32 after_sb_0p; | |
458 | u32 after_sb_1p; | |
459 | u32 after_sb_2p; | |
cf14c2e9 | 460 | u32 conn_flags; |
b411b363 PR |
461 | u32 two_primaries; |
462 | ||
463 | /* Since protocol version 87 and higher. */ | |
464 | char integrity_alg[0]; | |
465 | ||
466 | } __packed; | |
467 | ||
468 | struct p_uuids { | |
469 | struct p_header head; | |
470 | u64 uuid[UI_EXTENDED_SIZE]; | |
471 | } __packed; | |
472 | ||
473 | struct p_rs_uuid { | |
474 | struct p_header head; | |
475 | u64 uuid; | |
476 | } __packed; | |
477 | ||
478 | struct p_sizes { | |
479 | struct p_header head; | |
480 | u64 d_size; /* size of disk */ | |
481 | u64 u_size; /* user requested size */ | |
482 | u64 c_size; /* current exported size */ | |
483 | u32 max_segment_size; /* Maximal size of a BIO */ | |
e89b591c PR |
484 | u16 queue_order_type; /* not yet implemented in DRBD*/ |
485 | u16 dds_flags; /* use enum dds_flags here. */ | |
b411b363 PR |
486 | } __packed; |
487 | ||
488 | struct p_state { | |
489 | struct p_header head; | |
490 | u32 state; | |
491 | } __packed; | |
492 | ||
493 | struct p_req_state { | |
494 | struct p_header head; | |
495 | u32 mask; | |
496 | u32 val; | |
497 | } __packed; | |
498 | ||
499 | struct p_req_state_reply { | |
500 | struct p_header head; | |
501 | u32 retcode; | |
502 | } __packed; | |
503 | ||
504 | struct p_drbd06_param { | |
505 | u64 size; | |
506 | u32 state; | |
507 | u32 blksize; | |
508 | u32 protocol; | |
509 | u32 version; | |
510 | u32 gen_cnt[5]; | |
511 | u32 bit_map_gen[5]; | |
512 | } __packed; | |
513 | ||
514 | struct p_discard { | |
515 | struct p_header head; | |
516 | u64 block_id; | |
517 | u32 seq_num; | |
518 | u32 pad; | |
519 | } __packed; | |
520 | ||
521 | /* Valid values for the encoding field. | |
522 | * Bump proto version when changing this. */ | |
523 | enum drbd_bitmap_code { | |
524 | /* RLE_VLI_Bytes = 0, | |
525 | * and other bit variants had been defined during | |
526 | * algorithm evaluation. */ | |
527 | RLE_VLI_Bits = 2, | |
528 | }; | |
529 | ||
530 | struct p_compressed_bm { | |
531 | struct p_header head; | |
532 | /* (encoding & 0x0f): actual encoding, see enum drbd_bitmap_code | |
533 | * (encoding & 0x80): polarity (set/unset) of first runlength | |
534 | * ((encoding >> 4) & 0x07): pad_bits, number of trailing zero bits | |
535 | * used to pad up to head.length bytes | |
536 | */ | |
537 | u8 encoding; | |
538 | ||
539 | u8 code[0]; | |
540 | } __packed; | |
541 | ||
542 | /* DCBP: Drbd Compressed Bitmap Packet ... */ | |
543 | static inline enum drbd_bitmap_code | |
544 | DCBP_get_code(struct p_compressed_bm *p) | |
545 | { | |
546 | return (enum drbd_bitmap_code)(p->encoding & 0x0f); | |
547 | } | |
548 | ||
549 | static inline void | |
550 | DCBP_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code) | |
551 | { | |
552 | BUG_ON(code & ~0xf); | |
553 | p->encoding = (p->encoding & ~0xf) | code; | |
554 | } | |
555 | ||
556 | static inline int | |
557 | DCBP_get_start(struct p_compressed_bm *p) | |
558 | { | |
559 | return (p->encoding & 0x80) != 0; | |
560 | } | |
561 | ||
562 | static inline void | |
563 | DCBP_set_start(struct p_compressed_bm *p, int set) | |
564 | { | |
565 | p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0); | |
566 | } | |
567 | ||
568 | static inline int | |
569 | DCBP_get_pad_bits(struct p_compressed_bm *p) | |
570 | { | |
571 | return (p->encoding >> 4) & 0x7; | |
572 | } | |
573 | ||
574 | static inline void | |
575 | DCBP_set_pad_bits(struct p_compressed_bm *p, int n) | |
576 | { | |
577 | BUG_ON(n & ~0x7); | |
578 | p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4); | |
579 | } | |
580 | ||
581 | /* one bitmap packet, including the p_header, | |
582 | * should fit within one _architecture independend_ page. | |
583 | * so we need to use the fixed size 4KiB page size | |
584 | * most architechtures have used for a long time. | |
585 | */ | |
586 | #define BM_PACKET_PAYLOAD_BYTES (4096 - sizeof(struct p_header)) | |
587 | #define BM_PACKET_WORDS (BM_PACKET_PAYLOAD_BYTES/sizeof(long)) | |
588 | #define BM_PACKET_VLI_BYTES_MAX (4096 - sizeof(struct p_compressed_bm)) | |
589 | #if (PAGE_SIZE < 4096) | |
590 | /* drbd_send_bitmap / receive_bitmap would break horribly */ | |
591 | #error "PAGE_SIZE too small" | |
592 | #endif | |
593 | ||
594 | union p_polymorph { | |
595 | struct p_header header; | |
596 | struct p_handshake handshake; | |
597 | struct p_data data; | |
598 | struct p_block_ack block_ack; | |
599 | struct p_barrier barrier; | |
600 | struct p_barrier_ack barrier_ack; | |
601 | struct p_rs_param_89 rs_param_89; | |
602 | struct p_protocol protocol; | |
603 | struct p_sizes sizes; | |
604 | struct p_uuids uuids; | |
605 | struct p_state state; | |
606 | struct p_req_state req_state; | |
607 | struct p_req_state_reply req_state_reply; | |
608 | struct p_block_req block_req; | |
609 | } __packed; | |
610 | ||
611 | /**********************************************************************/ | |
612 | enum drbd_thread_state { | |
613 | None, | |
614 | Running, | |
615 | Exiting, | |
616 | Restarting | |
617 | }; | |
618 | ||
619 | struct drbd_thread { | |
620 | spinlock_t t_lock; | |
621 | struct task_struct *task; | |
622 | struct completion stop; | |
623 | enum drbd_thread_state t_state; | |
624 | int (*function) (struct drbd_thread *); | |
625 | struct drbd_conf *mdev; | |
626 | int reset_cpu_mask; | |
627 | }; | |
628 | ||
629 | static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi) | |
630 | { | |
631 | /* THINK testing the t_state seems to be uncritical in all cases | |
632 | * (but thread_{start,stop}), so we can read it *without* the lock. | |
633 | * --lge */ | |
634 | ||
635 | smp_rmb(); | |
636 | return thi->t_state; | |
637 | } | |
638 | ||
639 | ||
640 | /* | |
641 | * Having this as the first member of a struct provides sort of "inheritance". | |
642 | * "derived" structs can be "drbd_queue_work()"ed. | |
643 | * The callback should know and cast back to the descendant struct. | |
644 | * drbd_request and drbd_epoch_entry are descendants of drbd_work. | |
645 | */ | |
646 | struct drbd_work; | |
647 | typedef int (*drbd_work_cb)(struct drbd_conf *, struct drbd_work *, int cancel); | |
648 | struct drbd_work { | |
649 | struct list_head list; | |
650 | drbd_work_cb cb; | |
651 | }; | |
652 | ||
653 | struct drbd_tl_epoch; | |
654 | struct drbd_request { | |
655 | struct drbd_work w; | |
656 | struct drbd_conf *mdev; | |
657 | ||
658 | /* if local IO is not allowed, will be NULL. | |
659 | * if local IO _is_ allowed, holds the locally submitted bio clone, | |
660 | * or, after local IO completion, the ERR_PTR(error). | |
661 | * see drbd_endio_pri(). */ | |
662 | struct bio *private_bio; | |
663 | ||
664 | struct hlist_node colision; | |
665 | sector_t sector; | |
666 | unsigned int size; | |
667 | unsigned int epoch; /* barrier_nr */ | |
668 | ||
669 | /* barrier_nr: used to check on "completion" whether this req was in | |
670 | * the current epoch, and we therefore have to close it, | |
671 | * starting a new epoch... | |
672 | */ | |
673 | ||
674 | /* up to here, the struct layout is identical to drbd_epoch_entry; | |
675 | * we might be able to use that to our advantage... */ | |
676 | ||
677 | struct list_head tl_requests; /* ring list in the transfer log */ | |
678 | struct bio *master_bio; /* master bio pointer */ | |
679 | unsigned long rq_state; /* see comments above _req_mod() */ | |
680 | int seq_num; | |
681 | unsigned long start_time; | |
682 | }; | |
683 | ||
684 | struct drbd_tl_epoch { | |
685 | struct drbd_work w; | |
686 | struct list_head requests; /* requests before */ | |
687 | struct drbd_tl_epoch *next; /* pointer to the next barrier */ | |
688 | unsigned int br_number; /* the barriers identifier. */ | |
689 | int n_req; /* number of requests attached before this barrier */ | |
690 | }; | |
691 | ||
692 | struct drbd_request; | |
693 | ||
694 | /* These Tl_epoch_entries may be in one of 6 lists: | |
695 | active_ee .. data packet being written | |
696 | sync_ee .. syncer block being written | |
697 | done_ee .. block written, need to send P_WRITE_ACK | |
698 | read_ee .. [RS]P_DATA_REQUEST being read | |
699 | */ | |
700 | ||
701 | struct drbd_epoch { | |
702 | struct list_head list; | |
703 | unsigned int barrier_nr; | |
704 | atomic_t epoch_size; /* increased on every request added. */ | |
705 | atomic_t active; /* increased on every req. added, and dec on every finished. */ | |
706 | unsigned long flags; | |
707 | }; | |
708 | ||
709 | /* drbd_epoch flag bits */ | |
710 | enum { | |
711 | DE_BARRIER_IN_NEXT_EPOCH_ISSUED, | |
712 | DE_BARRIER_IN_NEXT_EPOCH_DONE, | |
713 | DE_CONTAINS_A_BARRIER, | |
714 | DE_HAVE_BARRIER_NUMBER, | |
715 | DE_IS_FINISHING, | |
716 | }; | |
717 | ||
718 | enum epoch_event { | |
719 | EV_PUT, | |
720 | EV_GOT_BARRIER_NR, | |
721 | EV_BARRIER_DONE, | |
722 | EV_BECAME_LAST, | |
b411b363 PR |
723 | EV_CLEANUP = 32, /* used as flag */ |
724 | }; | |
725 | ||
726 | struct drbd_epoch_entry { | |
727 | struct drbd_work w; | |
728 | struct drbd_conf *mdev; | |
729 | struct bio *private_bio; | |
730 | struct hlist_node colision; | |
731 | sector_t sector; | |
732 | unsigned int size; | |
733 | struct drbd_epoch *epoch; | |
734 | ||
735 | /* up to here, the struct layout is identical to drbd_request; | |
736 | * we might be able to use that to our advantage... */ | |
737 | ||
738 | unsigned int flags; | |
739 | u64 block_id; | |
740 | }; | |
741 | ||
742 | struct drbd_wq_barrier { | |
743 | struct drbd_work w; | |
744 | struct completion done; | |
745 | }; | |
746 | ||
747 | struct digest_info { | |
748 | int digest_size; | |
749 | void *digest; | |
750 | }; | |
751 | ||
752 | /* ee flag bits */ | |
753 | enum { | |
754 | __EE_CALL_AL_COMPLETE_IO, | |
755 | __EE_CONFLICT_PENDING, | |
756 | __EE_MAY_SET_IN_SYNC, | |
757 | __EE_IS_BARRIER, | |
758 | }; | |
759 | #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) | |
760 | #define EE_CONFLICT_PENDING (1<<__EE_CONFLICT_PENDING) | |
761 | #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) | |
762 | #define EE_IS_BARRIER (1<<__EE_IS_BARRIER) | |
763 | ||
764 | /* global flag bits */ | |
765 | enum { | |
766 | CREATE_BARRIER, /* next P_DATA is preceeded by a P_BARRIER */ | |
767 | SIGNAL_ASENDER, /* whether asender wants to be interrupted */ | |
768 | SEND_PING, /* whether asender should send a ping asap */ | |
769 | ||
770 | STOP_SYNC_TIMER, /* tell timer to cancel itself */ | |
771 | UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ | |
772 | UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ | |
773 | MD_DIRTY, /* current uuids and flags not yet on disk */ | |
774 | DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ | |
775 | USE_DEGR_WFC_T, /* degr-wfc-timeout instead of wfc-timeout. */ | |
776 | CLUSTER_ST_CHANGE, /* Cluster wide state change going on... */ | |
777 | CL_ST_CHG_SUCCESS, | |
778 | CL_ST_CHG_FAIL, | |
779 | CRASHED_PRIMARY, /* This node was a crashed primary. | |
780 | * Gets cleared when the state.conn | |
781 | * goes into C_CONNECTED state. */ | |
782 | WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ | |
783 | NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ | |
784 | CONSIDER_RESYNC, | |
785 | ||
786 | MD_NO_BARRIER, /* meta data device does not support barriers, | |
787 | so don't even try */ | |
788 | SUSPEND_IO, /* suspend application io */ | |
789 | BITMAP_IO, /* suspend application io; | |
790 | once no more io in flight, start bitmap io */ | |
791 | BITMAP_IO_QUEUED, /* Started bitmap IO */ | |
792 | RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ | |
793 | NET_CONGESTED, /* The data socket is congested */ | |
794 | ||
795 | CONFIG_PENDING, /* serialization of (re)configuration requests. | |
796 | * if set, also prevents the device from dying */ | |
797 | DEVICE_DYING, /* device became unconfigured, | |
798 | * but worker thread is still handling the cleanup. | |
799 | * reconfiguring (nl_disk_conf, nl_net_conf) is dissalowed, | |
800 | * while this is set. */ | |
801 | RESIZE_PENDING, /* Size change detected locally, waiting for the response from | |
802 | * the peer, if it changed there as well. */ | |
cf14c2e9 | 803 | CONN_DRY_RUN, /* Expect disconnect after resync handshake. */ |
309d1608 | 804 | GOT_PING_ACK, /* set when we receive a ping_ack packet, misc wait gets woken */ |
b411b363 PR |
805 | }; |
806 | ||
807 | struct drbd_bitmap; /* opaque for drbd_conf */ | |
808 | ||
809 | /* TODO sort members for performance | |
810 | * MAYBE group them further */ | |
811 | ||
812 | /* THINK maybe we actually want to use the default "event/%s" worker threads | |
813 | * or similar in linux 2.6, which uses per cpu data and threads. | |
814 | * | |
815 | * To be general, this might need a spin_lock member. | |
816 | * For now, please use the mdev->req_lock to protect list_head, | |
817 | * see drbd_queue_work below. | |
818 | */ | |
819 | struct drbd_work_queue { | |
820 | struct list_head q; | |
821 | struct semaphore s; /* producers up it, worker down()s it */ | |
822 | spinlock_t q_lock; /* to protect the list. */ | |
823 | }; | |
824 | ||
825 | struct drbd_socket { | |
826 | struct drbd_work_queue work; | |
827 | struct mutex mutex; | |
828 | struct socket *socket; | |
829 | /* this way we get our | |
830 | * send/receive buffers off the stack */ | |
831 | union p_polymorph sbuf; | |
832 | union p_polymorph rbuf; | |
833 | }; | |
834 | ||
835 | struct drbd_md { | |
836 | u64 md_offset; /* sector offset to 'super' block */ | |
837 | ||
838 | u64 la_size_sect; /* last agreed size, unit sectors */ | |
839 | u64 uuid[UI_SIZE]; | |
840 | u64 device_uuid; | |
841 | u32 flags; | |
842 | u32 md_size_sect; | |
843 | ||
844 | s32 al_offset; /* signed relative sector offset to al area */ | |
845 | s32 bm_offset; /* signed relative sector offset to bitmap */ | |
846 | ||
847 | /* u32 al_nr_extents; important for restoring the AL | |
848 | * is stored into sync_conf.al_extents, which in turn | |
849 | * gets applied to act_log->nr_elements | |
850 | */ | |
851 | }; | |
852 | ||
853 | /* for sync_conf and other types... */ | |
854 | #define NL_PACKET(name, number, fields) struct name { fields }; | |
855 | #define NL_INTEGER(pn,pr,member) int member; | |
856 | #define NL_INT64(pn,pr,member) __u64 member; | |
857 | #define NL_BIT(pn,pr,member) unsigned member:1; | |
858 | #define NL_STRING(pn,pr,member,len) unsigned char member[len]; int member ## _len; | |
859 | #include "linux/drbd_nl.h" | |
860 | ||
861 | struct drbd_backing_dev { | |
862 | struct block_device *backing_bdev; | |
863 | struct block_device *md_bdev; | |
864 | struct file *lo_file; | |
865 | struct file *md_file; | |
866 | struct drbd_md md; | |
867 | struct disk_conf dc; /* The user provided config... */ | |
868 | sector_t known_size; /* last known size of that backing device */ | |
869 | }; | |
870 | ||
871 | struct drbd_md_io { | |
872 | struct drbd_conf *mdev; | |
873 | struct completion event; | |
874 | int error; | |
875 | }; | |
876 | ||
877 | struct bm_io_work { | |
878 | struct drbd_work w; | |
879 | char *why; | |
880 | int (*io_fn)(struct drbd_conf *mdev); | |
881 | void (*done)(struct drbd_conf *mdev, int rv); | |
882 | }; | |
883 | ||
884 | enum write_ordering_e { | |
885 | WO_none, | |
886 | WO_drain_io, | |
887 | WO_bdev_flush, | |
888 | WO_bio_barrier | |
889 | }; | |
890 | ||
891 | struct drbd_conf { | |
892 | /* things that are stored as / read from meta data on disk */ | |
893 | unsigned long flags; | |
894 | ||
895 | /* configured by drbdsetup */ | |
896 | struct net_conf *net_conf; /* protected by get_net_conf() and put_net_conf() */ | |
897 | struct syncer_conf sync_conf; | |
898 | struct drbd_backing_dev *ldev __protected_by(local); | |
899 | ||
900 | sector_t p_size; /* partner's disk size */ | |
901 | struct request_queue *rq_queue; | |
902 | struct block_device *this_bdev; | |
903 | struct gendisk *vdisk; | |
904 | ||
905 | struct drbd_socket data; /* data/barrier/cstate/parameter packets */ | |
906 | struct drbd_socket meta; /* ping/ack (metadata) packets */ | |
907 | int agreed_pro_version; /* actually used protocol version */ | |
908 | unsigned long last_received; /* in jiffies, either socket */ | |
909 | unsigned int ko_count; | |
910 | struct drbd_work resync_work, | |
911 | unplug_work, | |
912 | md_sync_work; | |
913 | struct timer_list resync_timer; | |
914 | struct timer_list md_sync_timer; | |
915 | ||
916 | /* Used after attach while negotiating new disk state. */ | |
917 | union drbd_state new_state_tmp; | |
918 | ||
919 | union drbd_state state; | |
920 | wait_queue_head_t misc_wait; | |
921 | wait_queue_head_t state_wait; /* upon each state change. */ | |
922 | unsigned int send_cnt; | |
923 | unsigned int recv_cnt; | |
924 | unsigned int read_cnt; | |
925 | unsigned int writ_cnt; | |
926 | unsigned int al_writ_cnt; | |
927 | unsigned int bm_writ_cnt; | |
928 | atomic_t ap_bio_cnt; /* Requests we need to complete */ | |
929 | atomic_t ap_pending_cnt; /* AP data packets on the wire, ack expected */ | |
930 | atomic_t rs_pending_cnt; /* RS request/data packets on the wire */ | |
931 | atomic_t unacked_cnt; /* Need to send replys for */ | |
932 | atomic_t local_cnt; /* Waiting for local completion */ | |
933 | atomic_t net_cnt; /* Users of net_conf */ | |
934 | spinlock_t req_lock; | |
935 | struct drbd_tl_epoch *unused_spare_tle; /* for pre-allocation */ | |
936 | struct drbd_tl_epoch *newest_tle; | |
937 | struct drbd_tl_epoch *oldest_tle; | |
938 | struct list_head out_of_sequence_requests; | |
939 | struct hlist_head *tl_hash; | |
940 | unsigned int tl_hash_s; | |
941 | ||
942 | /* blocks to sync in this run [unit BM_BLOCK_SIZE] */ | |
943 | unsigned long rs_total; | |
944 | /* number of sync IOs that failed in this run */ | |
945 | unsigned long rs_failed; | |
946 | /* Syncer's start time [unit jiffies] */ | |
947 | unsigned long rs_start; | |
948 | /* cumulated time in PausedSyncX state [unit jiffies] */ | |
949 | unsigned long rs_paused; | |
950 | /* block not up-to-date at mark [unit BM_BLOCK_SIZE] */ | |
951 | unsigned long rs_mark_left; | |
952 | /* marks's time [unit jiffies] */ | |
953 | unsigned long rs_mark_time; | |
954 | /* skipped because csum was equeal [unit BM_BLOCK_SIZE] */ | |
955 | unsigned long rs_same_csum; | |
956 | ||
957 | /* where does the admin want us to start? (sector) */ | |
958 | sector_t ov_start_sector; | |
959 | /* where are we now? (sector) */ | |
960 | sector_t ov_position; | |
961 | /* Start sector of out of sync range (to merge printk reporting). */ | |
962 | sector_t ov_last_oos_start; | |
963 | /* size of out-of-sync range in sectors. */ | |
964 | sector_t ov_last_oos_size; | |
965 | unsigned long ov_left; /* in bits */ | |
966 | struct crypto_hash *csums_tfm; | |
967 | struct crypto_hash *verify_tfm; | |
968 | ||
969 | struct drbd_thread receiver; | |
970 | struct drbd_thread worker; | |
971 | struct drbd_thread asender; | |
972 | struct drbd_bitmap *bitmap; | |
973 | unsigned long bm_resync_fo; /* bit offset for drbd_bm_find_next */ | |
974 | ||
975 | /* Used to track operations of resync... */ | |
976 | struct lru_cache *resync; | |
977 | /* Number of locked elements in resync LRU */ | |
978 | unsigned int resync_locked; | |
979 | /* resync extent number waiting for application requests */ | |
980 | unsigned int resync_wenr; | |
981 | ||
982 | int open_cnt; | |
983 | u64 *p_uuid; | |
984 | struct drbd_epoch *current_epoch; | |
985 | spinlock_t epoch_lock; | |
986 | unsigned int epochs; | |
987 | enum write_ordering_e write_ordering; | |
988 | struct list_head active_ee; /* IO in progress */ | |
989 | struct list_head sync_ee; /* IO in progress */ | |
990 | struct list_head done_ee; /* send ack */ | |
991 | struct list_head read_ee; /* IO in progress */ | |
992 | struct list_head net_ee; /* zero-copy network send in progress */ | |
993 | struct hlist_head *ee_hash; /* is proteced by req_lock! */ | |
994 | unsigned int ee_hash_s; | |
995 | ||
996 | /* this one is protected by ee_lock, single thread */ | |
997 | struct drbd_epoch_entry *last_write_w_barrier; | |
998 | ||
999 | int next_barrier_nr; | |
1000 | struct hlist_head *app_reads_hash; /* is proteced by req_lock */ | |
1001 | struct list_head resync_reads; | |
1002 | atomic_t pp_in_use; | |
1003 | wait_queue_head_t ee_wait; | |
1004 | struct page *md_io_page; /* one page buffer for md_io */ | |
1005 | struct page *md_io_tmpp; /* for logical_block_size != 512 */ | |
1006 | struct mutex md_io_mutex; /* protects the md_io_buffer */ | |
1007 | spinlock_t al_lock; | |
1008 | wait_queue_head_t al_wait; | |
1009 | struct lru_cache *act_log; /* activity log */ | |
1010 | unsigned int al_tr_number; | |
1011 | int al_tr_cycle; | |
1012 | int al_tr_pos; /* position of the next transaction in the journal */ | |
1013 | struct crypto_hash *cram_hmac_tfm; | |
1014 | struct crypto_hash *integrity_w_tfm; /* to be used by the worker thread */ | |
1015 | struct crypto_hash *integrity_r_tfm; /* to be used by the receiver thread */ | |
1016 | void *int_dig_out; | |
1017 | void *int_dig_in; | |
1018 | void *int_dig_vv; | |
1019 | wait_queue_head_t seq_wait; | |
1020 | atomic_t packet_seq; | |
1021 | unsigned int peer_seq; | |
1022 | spinlock_t peer_seq_lock; | |
1023 | unsigned int minor; | |
1024 | unsigned long comm_bm_set; /* communicated number of set bits. */ | |
1025 | cpumask_var_t cpu_mask; | |
1026 | struct bm_io_work bm_io_work; | |
1027 | u64 ed_uuid; /* UUID of the exposed data */ | |
1028 | struct mutex state_mutex; | |
1029 | char congestion_reason; /* Why we where congested... */ | |
1030 | }; | |
1031 | ||
1032 | static inline struct drbd_conf *minor_to_mdev(unsigned int minor) | |
1033 | { | |
1034 | struct drbd_conf *mdev; | |
1035 | ||
1036 | mdev = minor < minor_count ? minor_table[minor] : NULL; | |
1037 | ||
1038 | return mdev; | |
1039 | } | |
1040 | ||
1041 | static inline unsigned int mdev_to_minor(struct drbd_conf *mdev) | |
1042 | { | |
1043 | return mdev->minor; | |
1044 | } | |
1045 | ||
1046 | /* returns 1 if it was successfull, | |
1047 | * returns 0 if there was no data socket. | |
1048 | * so wherever you are going to use the data.socket, e.g. do | |
1049 | * if (!drbd_get_data_sock(mdev)) | |
1050 | * return 0; | |
1051 | * CODE(); | |
1052 | * drbd_put_data_sock(mdev); | |
1053 | */ | |
1054 | static inline int drbd_get_data_sock(struct drbd_conf *mdev) | |
1055 | { | |
1056 | mutex_lock(&mdev->data.mutex); | |
1057 | /* drbd_disconnect() could have called drbd_free_sock() | |
1058 | * while we were waiting in down()... */ | |
1059 | if (unlikely(mdev->data.socket == NULL)) { | |
1060 | mutex_unlock(&mdev->data.mutex); | |
1061 | return 0; | |
1062 | } | |
1063 | return 1; | |
1064 | } | |
1065 | ||
1066 | static inline void drbd_put_data_sock(struct drbd_conf *mdev) | |
1067 | { | |
1068 | mutex_unlock(&mdev->data.mutex); | |
1069 | } | |
1070 | ||
1071 | /* | |
1072 | * function declarations | |
1073 | *************************/ | |
1074 | ||
1075 | /* drbd_main.c */ | |
1076 | ||
1077 | enum chg_state_flags { | |
1078 | CS_HARD = 1, | |
1079 | CS_VERBOSE = 2, | |
1080 | CS_WAIT_COMPLETE = 4, | |
1081 | CS_SERIALIZE = 8, | |
1082 | CS_ORDERED = CS_WAIT_COMPLETE + CS_SERIALIZE, | |
1083 | }; | |
1084 | ||
e89b591c PR |
1085 | enum dds_flags { |
1086 | DDSF_FORCED = 1, | |
1087 | DDSF_NO_RESYNC = 2, /* Do not run a resync for the new space */ | |
1088 | }; | |
1089 | ||
b411b363 PR |
1090 | extern void drbd_init_set_defaults(struct drbd_conf *mdev); |
1091 | extern int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f, | |
1092 | union drbd_state mask, union drbd_state val); | |
1093 | extern void drbd_force_state(struct drbd_conf *, union drbd_state, | |
1094 | union drbd_state); | |
1095 | extern int _drbd_request_state(struct drbd_conf *, union drbd_state, | |
1096 | union drbd_state, enum chg_state_flags); | |
1097 | extern int __drbd_set_state(struct drbd_conf *, union drbd_state, | |
1098 | enum chg_state_flags, struct completion *done); | |
1099 | extern void print_st_err(struct drbd_conf *, union drbd_state, | |
1100 | union drbd_state, int); | |
1101 | extern int drbd_thread_start(struct drbd_thread *thi); | |
1102 | extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); | |
1103 | #ifdef CONFIG_SMP | |
1104 | extern void drbd_thread_current_set_cpu(struct drbd_conf *mdev); | |
1105 | extern void drbd_calc_cpu_mask(struct drbd_conf *mdev); | |
1106 | #else | |
1107 | #define drbd_thread_current_set_cpu(A) ({}) | |
1108 | #define drbd_calc_cpu_mask(A) ({}) | |
1109 | #endif | |
1110 | extern void drbd_free_resources(struct drbd_conf *mdev); | |
1111 | extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr, | |
1112 | unsigned int set_size); | |
1113 | extern void tl_clear(struct drbd_conf *mdev); | |
1114 | extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *); | |
1115 | extern void drbd_free_sock(struct drbd_conf *mdev); | |
1116 | extern int drbd_send(struct drbd_conf *mdev, struct socket *sock, | |
1117 | void *buf, size_t size, unsigned msg_flags); | |
1118 | extern int drbd_send_protocol(struct drbd_conf *mdev); | |
1119 | extern int drbd_send_uuids(struct drbd_conf *mdev); | |
1120 | extern int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev); | |
1121 | extern int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val); | |
e89b591c | 1122 | extern int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags); |
b411b363 PR |
1123 | extern int _drbd_send_state(struct drbd_conf *mdev); |
1124 | extern int drbd_send_state(struct drbd_conf *mdev); | |
1125 | extern int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock, | |
1126 | enum drbd_packets cmd, struct p_header *h, | |
1127 | size_t size, unsigned msg_flags); | |
1128 | #define USE_DATA_SOCKET 1 | |
1129 | #define USE_META_SOCKET 0 | |
1130 | extern int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket, | |
1131 | enum drbd_packets cmd, struct p_header *h, | |
1132 | size_t size); | |
1133 | extern int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1134 | char *data, size_t size); | |
1135 | extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc); | |
1136 | extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, | |
1137 | u32 set_size); | |
1138 | extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1139 | struct drbd_epoch_entry *e); | |
1140 | extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1141 | struct p_block_req *rp); | |
1142 | extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1143 | struct p_data *dp); | |
1144 | extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1145 | sector_t sector, int blksize, u64 block_id); | |
1146 | extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd, | |
1147 | struct drbd_epoch_entry *e); | |
1148 | extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req); | |
1149 | extern int _drbd_send_barrier(struct drbd_conf *mdev, | |
1150 | struct drbd_tl_epoch *barrier); | |
1151 | extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd, | |
1152 | sector_t sector, int size, u64 block_id); | |
1153 | extern int drbd_send_drequest_csum(struct drbd_conf *mdev, | |
1154 | sector_t sector,int size, | |
1155 | void *digest, int digest_size, | |
1156 | enum drbd_packets cmd); | |
1157 | extern int drbd_send_ov_request(struct drbd_conf *mdev,sector_t sector,int size); | |
1158 | ||
1159 | extern int drbd_send_bitmap(struct drbd_conf *mdev); | |
1160 | extern int _drbd_send_bitmap(struct drbd_conf *mdev); | |
1161 | extern int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode); | |
1162 | extern void drbd_free_bc(struct drbd_backing_dev *ldev); | |
1163 | extern void drbd_mdev_cleanup(struct drbd_conf *mdev); | |
1164 | ||
1165 | /* drbd_meta-data.c (still in drbd_main.c) */ | |
1166 | extern void drbd_md_sync(struct drbd_conf *mdev); | |
1167 | extern int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev); | |
1168 | /* maybe define them below as inline? */ | |
1169 | extern void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | |
1170 | extern void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local); | |
1171 | extern void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | |
1172 | extern void _drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local); | |
1173 | extern void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local); | |
1174 | extern void drbd_md_set_flag(struct drbd_conf *mdev, int flags) __must_hold(local); | |
1175 | extern void drbd_md_clear_flag(struct drbd_conf *mdev, int flags)__must_hold(local); | |
1176 | extern int drbd_md_test_flag(struct drbd_backing_dev *, int); | |
1177 | extern void drbd_md_mark_dirty(struct drbd_conf *mdev); | |
1178 | extern void drbd_queue_bitmap_io(struct drbd_conf *mdev, | |
1179 | int (*io_fn)(struct drbd_conf *), | |
1180 | void (*done)(struct drbd_conf *, int), | |
1181 | char *why); | |
1182 | extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); | |
1183 | extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); | |
1184 | extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); | |
1185 | ||
1186 | ||
1187 | /* Meta data layout | |
1188 | We reserve a 128MB Block (4k aligned) | |
1189 | * either at the end of the backing device | |
3ad2f3fb | 1190 | * or on a separate meta data device. */ |
b411b363 PR |
1191 | |
1192 | #define MD_RESERVED_SECT (128LU << 11) /* 128 MB, unit sectors */ | |
1193 | /* The following numbers are sectors */ | |
1194 | #define MD_AL_OFFSET 8 /* 8 Sectors after start of meta area */ | |
1195 | #define MD_AL_MAX_SIZE 64 /* = 32 kb LOG ~ 3776 extents ~ 14 GB Storage */ | |
1196 | /* Allows up to about 3.8TB */ | |
1197 | #define MD_BM_OFFSET (MD_AL_OFFSET + MD_AL_MAX_SIZE) | |
1198 | ||
1199 | /* Since the smalles IO unit is usually 512 byte */ | |
1200 | #define MD_SECTOR_SHIFT 9 | |
1201 | #define MD_SECTOR_SIZE (1<<MD_SECTOR_SHIFT) | |
1202 | ||
1203 | /* activity log */ | |
1204 | #define AL_EXTENTS_PT ((MD_SECTOR_SIZE-12)/8-1) /* 61 ; Extents per 512B sector */ | |
1205 | #define AL_EXTENT_SHIFT 22 /* One extent represents 4M Storage */ | |
1206 | #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT) | |
1207 | ||
1208 | #if BITS_PER_LONG == 32 | |
1209 | #define LN2_BPL 5 | |
1210 | #define cpu_to_lel(A) cpu_to_le32(A) | |
1211 | #define lel_to_cpu(A) le32_to_cpu(A) | |
1212 | #elif BITS_PER_LONG == 64 | |
1213 | #define LN2_BPL 6 | |
1214 | #define cpu_to_lel(A) cpu_to_le64(A) | |
1215 | #define lel_to_cpu(A) le64_to_cpu(A) | |
1216 | #else | |
1217 | #error "LN2 of BITS_PER_LONG unknown!" | |
1218 | #endif | |
1219 | ||
1220 | /* resync bitmap */ | |
1221 | /* 16MB sized 'bitmap extent' to track syncer usage */ | |
1222 | struct bm_extent { | |
1223 | int rs_left; /* number of bits set (out of sync) in this extent. */ | |
1224 | int rs_failed; /* number of failed resync requests in this extent. */ | |
1225 | unsigned long flags; | |
1226 | struct lc_element lce; | |
1227 | }; | |
1228 | ||
1229 | #define BME_NO_WRITES 0 /* bm_extent.flags: no more requests on this one! */ | |
1230 | #define BME_LOCKED 1 /* bm_extent.flags: syncer active on this one. */ | |
1231 | ||
1232 | /* drbd_bitmap.c */ | |
1233 | /* | |
1234 | * We need to store one bit for a block. | |
1235 | * Example: 1GB disk @ 4096 byte blocks ==> we need 32 KB bitmap. | |
1236 | * Bit 0 ==> local node thinks this block is binary identical on both nodes | |
1237 | * Bit 1 ==> local node thinks this block needs to be synced. | |
1238 | */ | |
1239 | ||
1240 | #define BM_BLOCK_SHIFT 12 /* 4k per bit */ | |
1241 | #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT) | |
1242 | /* (9+3) : 512 bytes @ 8 bits; representing 16M storage | |
1243 | * per sector of on disk bitmap */ | |
1244 | #define BM_EXT_SHIFT (BM_BLOCK_SHIFT + MD_SECTOR_SHIFT + 3) /* = 24 */ | |
1245 | #define BM_EXT_SIZE (1<<BM_EXT_SHIFT) | |
1246 | ||
1247 | #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12) | |
1248 | #error "HAVE YOU FIXED drbdmeta AS WELL??" | |
1249 | #endif | |
1250 | ||
1251 | /* thus many _storage_ sectors are described by one bit */ | |
1252 | #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9)) | |
1253 | #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9)) | |
1254 | #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1) | |
1255 | ||
1256 | /* bit to represented kilo byte conversion */ | |
1257 | #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10)) | |
1258 | ||
1259 | /* in which _bitmap_ extent (resp. sector) the bit for a certain | |
1260 | * _storage_ sector is located in */ | |
1261 | #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9)) | |
1262 | ||
1263 | /* how much _storage_ sectors we have per bitmap sector */ | |
1264 | #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9)) | |
1265 | #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1) | |
1266 | ||
1267 | /* in one sector of the bitmap, we have this many activity_log extents. */ | |
1268 | #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT)) | |
1269 | #define BM_WORDS_PER_AL_EXT (1 << (AL_EXTENT_SHIFT-BM_BLOCK_SHIFT-LN2_BPL)) | |
1270 | ||
1271 | #define BM_BLOCKS_PER_BM_EXT_B (BM_EXT_SHIFT - BM_BLOCK_SHIFT) | |
1272 | #define BM_BLOCKS_PER_BM_EXT_MASK ((1<<BM_BLOCKS_PER_BM_EXT_B) - 1) | |
1273 | ||
1274 | /* the extent in "PER_EXTENT" below is an activity log extent | |
1275 | * we need that many (long words/bytes) to store the bitmap | |
1276 | * of one AL_EXTENT_SIZE chunk of storage. | |
1277 | * we can store the bitmap for that many AL_EXTENTS within | |
1278 | * one sector of the _on_disk_ bitmap: | |
1279 | * bit 0 bit 37 bit 38 bit (512*8)-1 | |
1280 | * ...|........|........|.. // ..|........| | |
1281 | * sect. 0 `296 `304 ^(512*8*8)-1 | |
1282 | * | |
1283 | #define BM_WORDS_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / BITS_PER_LONG ) | |
1284 | #define BM_BYTES_PER_EXT ( (AL_EXT_SIZE/BM_BLOCK_SIZE) / 8 ) // 128 | |
1285 | #define BM_EXT_PER_SECT ( 512 / BM_BYTES_PER_EXTENT ) // 4 | |
1286 | */ | |
1287 | ||
1288 | #define DRBD_MAX_SECTORS_32 (0xffffffffLU) | |
1289 | #define DRBD_MAX_SECTORS_BM \ | |
1290 | ((MD_RESERVED_SECT - MD_BM_OFFSET) * (1LL<<(BM_EXT_SHIFT-9))) | |
1291 | #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 | |
1292 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM | |
1293 | #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM | |
36bfc7e2 | 1294 | #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32 |
b411b363 PR |
1295 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 |
1296 | #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 | |
1297 | #else | |
1298 | #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM | |
1299 | /* 16 TB in units of sectors */ | |
1300 | #if BITS_PER_LONG == 32 | |
1301 | /* adjust by one page worth of bitmap, | |
1302 | * so we won't wrap around in drbd_bm_find_next_bit. | |
1303 | * you should use 64bit OS for that much storage, anyways. */ | |
1304 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff) | |
1305 | #else | |
1306 | #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0x1LU << 32) | |
1307 | #endif | |
1308 | #endif | |
1309 | ||
1310 | /* Sector shift value for the "hash" functions of tl_hash and ee_hash tables. | |
1311 | * With a value of 6 all IO in one 32K block make it to the same slot of the | |
1312 | * hash table. */ | |
1313 | #define HT_SHIFT 6 | |
1314 | #define DRBD_MAX_SEGMENT_SIZE (1U<<(9+HT_SHIFT)) | |
1315 | ||
1316 | /* Number of elements in the app_reads_hash */ | |
1317 | #define APP_R_HSIZE 15 | |
1318 | ||
1319 | extern int drbd_bm_init(struct drbd_conf *mdev); | |
02d9a94b | 1320 | extern int drbd_bm_resize(struct drbd_conf *mdev, sector_t sectors, int set_new_bits); |
b411b363 PR |
1321 | extern void drbd_bm_cleanup(struct drbd_conf *mdev); |
1322 | extern void drbd_bm_set_all(struct drbd_conf *mdev); | |
1323 | extern void drbd_bm_clear_all(struct drbd_conf *mdev); | |
1324 | extern int drbd_bm_set_bits( | |
1325 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | |
1326 | extern int drbd_bm_clear_bits( | |
1327 | struct drbd_conf *mdev, unsigned long s, unsigned long e); | |
1328 | /* bm_set_bits variant for use while holding drbd_bm_lock */ | |
1329 | extern void _drbd_bm_set_bits(struct drbd_conf *mdev, | |
1330 | const unsigned long s, const unsigned long e); | |
1331 | extern int drbd_bm_test_bit(struct drbd_conf *mdev, unsigned long bitnr); | |
1332 | extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); | |
1333 | extern int drbd_bm_write_sect(struct drbd_conf *mdev, unsigned long enr) __must_hold(local); | |
1334 | extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); | |
1335 | extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); | |
1336 | extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, | |
1337 | unsigned long al_enr); | |
1338 | extern size_t drbd_bm_words(struct drbd_conf *mdev); | |
1339 | extern unsigned long drbd_bm_bits(struct drbd_conf *mdev); | |
1340 | extern sector_t drbd_bm_capacity(struct drbd_conf *mdev); | |
1341 | extern unsigned long drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | |
1342 | /* bm_find_next variants for use while you hold drbd_bm_lock() */ | |
1343 | extern unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo); | |
1344 | extern unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo); | |
1345 | extern unsigned long drbd_bm_total_weight(struct drbd_conf *mdev); | |
1346 | extern int drbd_bm_rs_done(struct drbd_conf *mdev); | |
1347 | /* for receive_bitmap */ | |
1348 | extern void drbd_bm_merge_lel(struct drbd_conf *mdev, size_t offset, | |
1349 | size_t number, unsigned long *buffer); | |
1350 | /* for _drbd_send_bitmap and drbd_bm_write_sect */ | |
1351 | extern void drbd_bm_get_lel(struct drbd_conf *mdev, size_t offset, | |
1352 | size_t number, unsigned long *buffer); | |
1353 | ||
1354 | extern void drbd_bm_lock(struct drbd_conf *mdev, char *why); | |
1355 | extern void drbd_bm_unlock(struct drbd_conf *mdev); | |
1356 | ||
1357 | extern int drbd_bm_count_bits(struct drbd_conf *mdev, const unsigned long s, const unsigned long e); | |
1358 | /* drbd_main.c */ | |
1359 | ||
1360 | extern struct kmem_cache *drbd_request_cache; | |
1361 | extern struct kmem_cache *drbd_ee_cache; /* epoch entries */ | |
1362 | extern struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */ | |
1363 | extern struct kmem_cache *drbd_al_ext_cache; /* activity log extents */ | |
1364 | extern mempool_t *drbd_request_mempool; | |
1365 | extern mempool_t *drbd_ee_mempool; | |
1366 | ||
1367 | extern struct page *drbd_pp_pool; /* drbd's page pool */ | |
1368 | extern spinlock_t drbd_pp_lock; | |
1369 | extern int drbd_pp_vacant; | |
1370 | extern wait_queue_head_t drbd_pp_wait; | |
1371 | ||
1372 | extern rwlock_t global_state_lock; | |
1373 | ||
1374 | extern struct drbd_conf *drbd_new_device(unsigned int minor); | |
1375 | extern void drbd_free_mdev(struct drbd_conf *mdev); | |
1376 | ||
1377 | extern int proc_details; | |
1378 | ||
1379 | /* drbd_req */ | |
1380 | extern int drbd_make_request_26(struct request_queue *q, struct bio *bio); | |
1381 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); | |
1382 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | |
1383 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | |
1384 | ||
1385 | ||
1386 | /* drbd_nl.c */ | |
1387 | extern void drbd_suspend_io(struct drbd_conf *mdev); | |
1388 | extern void drbd_resume_io(struct drbd_conf *mdev); | |
1389 | extern char *ppsize(char *buf, unsigned long long size); | |
a393db6f | 1390 | extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int); |
b411b363 | 1391 | enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; |
d845030f | 1392 | extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, enum dds_flags) __must_hold(local); |
b411b363 PR |
1393 | extern void resync_after_online_grow(struct drbd_conf *); |
1394 | extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); | |
1395 | extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, | |
1396 | int force); | |
1397 | enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev); | |
1398 | extern int drbd_khelper(struct drbd_conf *mdev, char *cmd); | |
1399 | ||
1400 | /* drbd_worker.c */ | |
1401 | extern int drbd_worker(struct drbd_thread *thi); | |
1402 | extern int drbd_alter_sa(struct drbd_conf *mdev, int na); | |
1403 | extern void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side); | |
1404 | extern void resume_next_sg(struct drbd_conf *mdev); | |
1405 | extern void suspend_other_sg(struct drbd_conf *mdev); | |
1406 | extern int drbd_resync_finished(struct drbd_conf *mdev); | |
1407 | /* maybe rather drbd_main.c ? */ | |
1408 | extern int drbd_md_sync_page_io(struct drbd_conf *mdev, | |
1409 | struct drbd_backing_dev *bdev, sector_t sector, int rw); | |
1410 | extern void drbd_ov_oos_found(struct drbd_conf*, sector_t, int); | |
1411 | ||
1412 | static inline void ov_oos_print(struct drbd_conf *mdev) | |
1413 | { | |
1414 | if (mdev->ov_last_oos_size) { | |
1415 | dev_err(DEV, "Out of sync: start=%llu, size=%lu (sectors)\n", | |
1416 | (unsigned long long)mdev->ov_last_oos_start, | |
1417 | (unsigned long)mdev->ov_last_oos_size); | |
1418 | } | |
1419 | mdev->ov_last_oos_size=0; | |
1420 | } | |
1421 | ||
1422 | ||
1423 | extern void drbd_csum(struct drbd_conf *, struct crypto_hash *, struct bio *, void *); | |
1424 | /* worker callbacks */ | |
1425 | extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int); | |
1426 | extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int); | |
1427 | extern int w_e_end_data_req(struct drbd_conf *, struct drbd_work *, int); | |
1428 | extern int w_e_end_rsdata_req(struct drbd_conf *, struct drbd_work *, int); | |
1429 | extern int w_e_end_csum_rs_req(struct drbd_conf *, struct drbd_work *, int); | |
1430 | extern int w_e_end_ov_reply(struct drbd_conf *, struct drbd_work *, int); | |
1431 | extern int w_e_end_ov_req(struct drbd_conf *, struct drbd_work *, int); | |
1432 | extern int w_ov_finished(struct drbd_conf *, struct drbd_work *, int); | |
1433 | extern int w_resync_inactive(struct drbd_conf *, struct drbd_work *, int); | |
1434 | extern int w_resume_next_sg(struct drbd_conf *, struct drbd_work *, int); | |
1435 | extern int w_io_error(struct drbd_conf *, struct drbd_work *, int); | |
1436 | extern int w_send_write_hint(struct drbd_conf *, struct drbd_work *, int); | |
1437 | extern int w_make_resync_request(struct drbd_conf *, struct drbd_work *, int); | |
1438 | extern int w_send_dblock(struct drbd_conf *, struct drbd_work *, int); | |
1439 | extern int w_send_barrier(struct drbd_conf *, struct drbd_work *, int); | |
1440 | extern int w_send_read_req(struct drbd_conf *, struct drbd_work *, int); | |
1441 | extern int w_prev_work_done(struct drbd_conf *, struct drbd_work *, int); | |
1442 | extern int w_e_reissue(struct drbd_conf *, struct drbd_work *, int); | |
1443 | ||
1444 | extern void resync_timer_fn(unsigned long data); | |
1445 | ||
1446 | /* drbd_receiver.c */ | |
1447 | extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list); | |
1448 | extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, | |
1449 | u64 id, | |
1450 | sector_t sector, | |
1451 | unsigned int data_size, | |
1452 | gfp_t gfp_mask) __must_hold(local); | |
1453 | extern void drbd_free_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e); | |
1454 | extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev, | |
1455 | struct list_head *head); | |
1456 | extern void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, | |
1457 | struct list_head *head); | |
1458 | extern void drbd_set_recv_tcq(struct drbd_conf *mdev, int tcq_enabled); | |
1459 | extern void _drbd_clear_done_ee(struct drbd_conf *mdev, struct list_head *to_be_freed); | |
1460 | extern void drbd_flush_workqueue(struct drbd_conf *mdev); | |
1461 | ||
1462 | /* yes, there is kernel_setsockopt, but only since 2.6.18. we don't need to | |
1463 | * mess with get_fs/set_fs, we know we are KERNEL_DS always. */ | |
1464 | static inline int drbd_setsockopt(struct socket *sock, int level, int optname, | |
1465 | char __user *optval, int optlen) | |
1466 | { | |
1467 | int err; | |
1468 | if (level == SOL_SOCKET) | |
1469 | err = sock_setsockopt(sock, level, optname, optval, optlen); | |
1470 | else | |
1471 | err = sock->ops->setsockopt(sock, level, optname, optval, | |
1472 | optlen); | |
1473 | return err; | |
1474 | } | |
1475 | ||
1476 | static inline void drbd_tcp_cork(struct socket *sock) | |
1477 | { | |
1478 | int __user val = 1; | |
1479 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | |
1480 | (char __user *)&val, sizeof(val)); | |
1481 | } | |
1482 | ||
1483 | static inline void drbd_tcp_uncork(struct socket *sock) | |
1484 | { | |
1485 | int __user val = 0; | |
1486 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_CORK, | |
1487 | (char __user *)&val, sizeof(val)); | |
1488 | } | |
1489 | ||
1490 | static inline void drbd_tcp_nodelay(struct socket *sock) | |
1491 | { | |
1492 | int __user val = 1; | |
1493 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_NODELAY, | |
1494 | (char __user *)&val, sizeof(val)); | |
1495 | } | |
1496 | ||
1497 | static inline void drbd_tcp_quickack(struct socket *sock) | |
1498 | { | |
1499 | int __user val = 1; | |
1500 | (void) drbd_setsockopt(sock, SOL_TCP, TCP_QUICKACK, | |
1501 | (char __user *)&val, sizeof(val)); | |
1502 | } | |
1503 | ||
1504 | void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo); | |
1505 | ||
1506 | /* drbd_proc.c */ | |
1507 | extern struct proc_dir_entry *drbd_proc; | |
7d4e9d09 | 1508 | extern const struct file_operations drbd_proc_fops; |
b411b363 PR |
1509 | extern const char *drbd_conn_str(enum drbd_conns s); |
1510 | extern const char *drbd_role_str(enum drbd_role s); | |
1511 | ||
1512 | /* drbd_actlog.c */ | |
1513 | extern void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1514 | extern void drbd_al_complete_io(struct drbd_conf *mdev, sector_t sector); | |
1515 | extern void drbd_rs_complete_io(struct drbd_conf *mdev, sector_t sector); | |
1516 | extern int drbd_rs_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1517 | extern int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector); | |
1518 | extern void drbd_rs_cancel_all(struct drbd_conf *mdev); | |
1519 | extern int drbd_rs_del_all(struct drbd_conf *mdev); | |
1520 | extern void drbd_rs_failed_io(struct drbd_conf *mdev, | |
1521 | sector_t sector, int size); | |
1522 | extern int drbd_al_read_log(struct drbd_conf *mdev, struct drbd_backing_dev *); | |
1523 | extern void __drbd_set_in_sync(struct drbd_conf *mdev, sector_t sector, | |
1524 | int size, const char *file, const unsigned int line); | |
1525 | #define drbd_set_in_sync(mdev, sector, size) \ | |
1526 | __drbd_set_in_sync(mdev, sector, size, __FILE__, __LINE__) | |
1527 | extern void __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, | |
1528 | int size, const char *file, const unsigned int line); | |
1529 | #define drbd_set_out_of_sync(mdev, sector, size) \ | |
1530 | __drbd_set_out_of_sync(mdev, sector, size, __FILE__, __LINE__) | |
1531 | extern void drbd_al_apply_to_bm(struct drbd_conf *mdev); | |
1532 | extern void drbd_al_to_on_disk_bm(struct drbd_conf *mdev); | |
1533 | extern void drbd_al_shrink(struct drbd_conf *mdev); | |
1534 | ||
1535 | ||
1536 | /* drbd_nl.c */ | |
1537 | ||
1538 | void drbd_nl_cleanup(void); | |
1539 | int __init drbd_nl_init(void); | |
1540 | void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state); | |
1541 | void drbd_bcast_sync_progress(struct drbd_conf *mdev); | |
1542 | void drbd_bcast_ee(struct drbd_conf *mdev, | |
1543 | const char *reason, const int dgs, | |
1544 | const char* seen_hash, const char* calc_hash, | |
1545 | const struct drbd_epoch_entry* e); | |
1546 | ||
1547 | ||
1548 | /** | |
1549 | * DOC: DRBD State macros | |
1550 | * | |
1551 | * These macros are used to express state changes in easily readable form. | |
1552 | * | |
1553 | * The NS macros expand to a mask and a value, that can be bit ored onto the | |
1554 | * current state as soon as the spinlock (req_lock) was taken. | |
1555 | * | |
1556 | * The _NS macros are used for state functions that get called with the | |
1557 | * spinlock. These macros expand directly to the new state value. | |
1558 | * | |
1559 | * Besides the basic forms NS() and _NS() additional _?NS[23] are defined | |
1560 | * to express state changes that affect more than one aspect of the state. | |
1561 | * | |
1562 | * E.g. NS2(conn, C_CONNECTED, peer, R_SECONDARY) | |
1563 | * Means that the network connection was established and that the peer | |
1564 | * is in secondary role. | |
1565 | */ | |
1566 | #define role_MASK R_MASK | |
1567 | #define peer_MASK R_MASK | |
1568 | #define disk_MASK D_MASK | |
1569 | #define pdsk_MASK D_MASK | |
1570 | #define conn_MASK C_MASK | |
1571 | #define susp_MASK 1 | |
1572 | #define user_isp_MASK 1 | |
1573 | #define aftr_isp_MASK 1 | |
1574 | ||
1575 | #define NS(T, S) \ | |
1576 | ({ union drbd_state mask; mask.i = 0; mask.T = T##_MASK; mask; }), \ | |
1577 | ({ union drbd_state val; val.i = 0; val.T = (S); val; }) | |
1578 | #define NS2(T1, S1, T2, S2) \ | |
1579 | ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ | |
1580 | mask.T2 = T2##_MASK; mask; }), \ | |
1581 | ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ | |
1582 | val.T2 = (S2); val; }) | |
1583 | #define NS3(T1, S1, T2, S2, T3, S3) \ | |
1584 | ({ union drbd_state mask; mask.i = 0; mask.T1 = T1##_MASK; \ | |
1585 | mask.T2 = T2##_MASK; mask.T3 = T3##_MASK; mask; }), \ | |
1586 | ({ union drbd_state val; val.i = 0; val.T1 = (S1); \ | |
1587 | val.T2 = (S2); val.T3 = (S3); val; }) | |
1588 | ||
1589 | #define _NS(D, T, S) \ | |
1590 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T = (S); __ns; }) | |
1591 | #define _NS2(D, T1, S1, T2, S2) \ | |
1592 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ | |
1593 | __ns.T2 = (S2); __ns; }) | |
1594 | #define _NS3(D, T1, S1, T2, S2, T3, S3) \ | |
1595 | D, ({ union drbd_state __ns; __ns.i = D->state.i; __ns.T1 = (S1); \ | |
1596 | __ns.T2 = (S2); __ns.T3 = (S3); __ns; }) | |
1597 | ||
1598 | /* | |
1599 | * inline helper functions | |
1600 | *************************/ | |
1601 | ||
1602 | static inline void drbd_state_lock(struct drbd_conf *mdev) | |
1603 | { | |
1604 | wait_event(mdev->misc_wait, | |
1605 | !test_and_set_bit(CLUSTER_ST_CHANGE, &mdev->flags)); | |
1606 | } | |
1607 | ||
1608 | static inline void drbd_state_unlock(struct drbd_conf *mdev) | |
1609 | { | |
1610 | clear_bit(CLUSTER_ST_CHANGE, &mdev->flags); | |
1611 | wake_up(&mdev->misc_wait); | |
1612 | } | |
1613 | ||
1614 | static inline int _drbd_set_state(struct drbd_conf *mdev, | |
1615 | union drbd_state ns, enum chg_state_flags flags, | |
1616 | struct completion *done) | |
1617 | { | |
1618 | int rv; | |
1619 | ||
1620 | read_lock(&global_state_lock); | |
1621 | rv = __drbd_set_state(mdev, ns, flags, done); | |
1622 | read_unlock(&global_state_lock); | |
1623 | ||
1624 | return rv; | |
1625 | } | |
1626 | ||
1627 | /** | |
1628 | * drbd_request_state() - Reqest a state change | |
1629 | * @mdev: DRBD device. | |
1630 | * @mask: mask of state bits to change. | |
1631 | * @val: value of new state bits. | |
1632 | * | |
1633 | * This is the most graceful way of requesting a state change. It is verbose | |
1634 | * quite verbose in case the state change is not possible, and all those | |
1635 | * state changes are globally serialized. | |
1636 | */ | |
1637 | static inline int drbd_request_state(struct drbd_conf *mdev, | |
1638 | union drbd_state mask, | |
1639 | union drbd_state val) | |
1640 | { | |
1641 | return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); | |
1642 | } | |
1643 | ||
1644 | #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) | |
1645 | static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) | |
1646 | { | |
1647 | switch (mdev->ldev->dc.on_io_error) { | |
1648 | case EP_PASS_ON: | |
1649 | if (!forcedetach) { | |
1650 | if (printk_ratelimit()) | |
1651 | dev_err(DEV, "Local IO failed in %s." | |
1652 | "Passing error on...\n", where); | |
1653 | break; | |
1654 | } | |
1655 | /* NOTE fall through to detach case if forcedetach set */ | |
1656 | case EP_DETACH: | |
1657 | case EP_CALL_HELPER: | |
1658 | if (mdev->state.disk > D_FAILED) { | |
1659 | _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); | |
1660 | dev_err(DEV, "Local IO failed in %s." | |
1661 | "Detaching...\n", where); | |
1662 | } | |
1663 | break; | |
1664 | } | |
1665 | } | |
1666 | ||
1667 | /** | |
1668 | * drbd_chk_io_error: Handle the on_io_error setting, should be called from all io completion handlers | |
1669 | * @mdev: DRBD device. | |
1670 | * @error: Error code passed to the IO completion callback | |
1671 | * @forcedetach: Force detach. I.e. the error happened while accessing the meta data | |
1672 | * | |
1673 | * See also drbd_main.c:after_state_ch() if (os.disk > D_FAILED && ns.disk == D_FAILED) | |
1674 | */ | |
1675 | #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) | |
1676 | static inline void drbd_chk_io_error_(struct drbd_conf *mdev, | |
1677 | int error, int forcedetach, const char *where) | |
1678 | { | |
1679 | if (error) { | |
1680 | unsigned long flags; | |
1681 | spin_lock_irqsave(&mdev->req_lock, flags); | |
1682 | __drbd_chk_io_error_(mdev, forcedetach, where); | |
1683 | spin_unlock_irqrestore(&mdev->req_lock, flags); | |
1684 | } | |
1685 | } | |
1686 | ||
1687 | ||
1688 | /** | |
1689 | * drbd_md_first_sector() - Returns the first sector number of the meta data area | |
1690 | * @bdev: Meta data block device. | |
1691 | * | |
1692 | * BTW, for internal meta data, this happens to be the maximum capacity | |
1693 | * we could agree upon with our peer node. | |
1694 | */ | |
1695 | static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev) | |
1696 | { | |
1697 | switch (bdev->dc.meta_dev_idx) { | |
1698 | case DRBD_MD_INDEX_INTERNAL: | |
1699 | case DRBD_MD_INDEX_FLEX_INT: | |
1700 | return bdev->md.md_offset + bdev->md.bm_offset; | |
1701 | case DRBD_MD_INDEX_FLEX_EXT: | |
1702 | default: | |
1703 | return bdev->md.md_offset; | |
1704 | } | |
1705 | } | |
1706 | ||
1707 | /** | |
1708 | * drbd_md_last_sector() - Return the last sector number of the meta data area | |
1709 | * @bdev: Meta data block device. | |
1710 | */ | |
1711 | static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) | |
1712 | { | |
1713 | switch (bdev->dc.meta_dev_idx) { | |
1714 | case DRBD_MD_INDEX_INTERNAL: | |
1715 | case DRBD_MD_INDEX_FLEX_INT: | |
1716 | return bdev->md.md_offset + MD_AL_OFFSET - 1; | |
1717 | case DRBD_MD_INDEX_FLEX_EXT: | |
1718 | default: | |
1719 | return bdev->md.md_offset + bdev->md.md_size_sect; | |
1720 | } | |
1721 | } | |
1722 | ||
1723 | /* Returns the number of 512 byte sectors of the device */ | |
1724 | static inline sector_t drbd_get_capacity(struct block_device *bdev) | |
1725 | { | |
1726 | /* return bdev ? get_capacity(bdev->bd_disk) : 0; */ | |
1727 | return bdev ? bdev->bd_inode->i_size >> 9 : 0; | |
1728 | } | |
1729 | ||
1730 | /** | |
1731 | * drbd_get_max_capacity() - Returns the capacity we announce to out peer | |
1732 | * @bdev: Meta data block device. | |
1733 | * | |
1734 | * returns the capacity we announce to out peer. we clip ourselves at the | |
1735 | * various MAX_SECTORS, because if we don't, current implementation will | |
1736 | * oops sooner or later | |
1737 | */ | |
1738 | static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev) | |
1739 | { | |
1740 | sector_t s; | |
1741 | switch (bdev->dc.meta_dev_idx) { | |
1742 | case DRBD_MD_INDEX_INTERNAL: | |
1743 | case DRBD_MD_INDEX_FLEX_INT: | |
1744 | s = drbd_get_capacity(bdev->backing_bdev) | |
1745 | ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX, | |
1746 | drbd_md_first_sector(bdev)) | |
1747 | : 0; | |
1748 | break; | |
1749 | case DRBD_MD_INDEX_FLEX_EXT: | |
1750 | s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX, | |
1751 | drbd_get_capacity(bdev->backing_bdev)); | |
1752 | /* clip at maximum size the meta device can support */ | |
1753 | s = min_t(sector_t, s, | |
1754 | BM_EXT_TO_SECT(bdev->md.md_size_sect | |
1755 | - bdev->md.bm_offset)); | |
1756 | break; | |
1757 | default: | |
1758 | s = min_t(sector_t, DRBD_MAX_SECTORS, | |
1759 | drbd_get_capacity(bdev->backing_bdev)); | |
1760 | } | |
1761 | return s; | |
1762 | } | |
1763 | ||
1764 | /** | |
1765 | * drbd_md_ss__() - Return the sector number of our meta data super block | |
1766 | * @mdev: DRBD device. | |
1767 | * @bdev: Meta data block device. | |
1768 | */ | |
1769 | static inline sector_t drbd_md_ss__(struct drbd_conf *mdev, | |
1770 | struct drbd_backing_dev *bdev) | |
1771 | { | |
1772 | switch (bdev->dc.meta_dev_idx) { | |
1773 | default: /* external, some index */ | |
1774 | return MD_RESERVED_SECT * bdev->dc.meta_dev_idx; | |
1775 | case DRBD_MD_INDEX_INTERNAL: | |
1776 | /* with drbd08, internal meta data is always "flexible" */ | |
1777 | case DRBD_MD_INDEX_FLEX_INT: | |
1778 | /* sizeof(struct md_on_disk_07) == 4k | |
1779 | * position: last 4k aligned block of 4k size */ | |
1780 | if (!bdev->backing_bdev) { | |
1781 | if (__ratelimit(&drbd_ratelimit_state)) { | |
1782 | dev_err(DEV, "bdev->backing_bdev==NULL\n"); | |
1783 | dump_stack(); | |
1784 | } | |
1785 | return 0; | |
1786 | } | |
1787 | return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) | |
1788 | - MD_AL_OFFSET; | |
1789 | case DRBD_MD_INDEX_FLEX_EXT: | |
1790 | return 0; | |
1791 | } | |
1792 | } | |
1793 | ||
1794 | static inline void | |
1795 | _drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) | |
1796 | { | |
1797 | list_add_tail(&w->list, &q->q); | |
1798 | up(&q->s); | |
1799 | } | |
1800 | ||
1801 | static inline void | |
1802 | drbd_queue_work_front(struct drbd_work_queue *q, struct drbd_work *w) | |
1803 | { | |
1804 | unsigned long flags; | |
1805 | spin_lock_irqsave(&q->q_lock, flags); | |
1806 | list_add(&w->list, &q->q); | |
1807 | up(&q->s); /* within the spinlock, | |
1808 | see comment near end of drbd_worker() */ | |
1809 | spin_unlock_irqrestore(&q->q_lock, flags); | |
1810 | } | |
1811 | ||
1812 | static inline void | |
1813 | drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) | |
1814 | { | |
1815 | unsigned long flags; | |
1816 | spin_lock_irqsave(&q->q_lock, flags); | |
1817 | list_add_tail(&w->list, &q->q); | |
1818 | up(&q->s); /* within the spinlock, | |
1819 | see comment near end of drbd_worker() */ | |
1820 | spin_unlock_irqrestore(&q->q_lock, flags); | |
1821 | } | |
1822 | ||
1823 | static inline void wake_asender(struct drbd_conf *mdev) | |
1824 | { | |
1825 | if (test_bit(SIGNAL_ASENDER, &mdev->flags)) | |
1826 | force_sig(DRBD_SIG, mdev->asender.task); | |
1827 | } | |
1828 | ||
1829 | static inline void request_ping(struct drbd_conf *mdev) | |
1830 | { | |
1831 | set_bit(SEND_PING, &mdev->flags); | |
1832 | wake_asender(mdev); | |
1833 | } | |
1834 | ||
1835 | static inline int drbd_send_short_cmd(struct drbd_conf *mdev, | |
1836 | enum drbd_packets cmd) | |
1837 | { | |
1838 | struct p_header h; | |
1839 | return drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd, &h, sizeof(h)); | |
1840 | } | |
1841 | ||
1842 | static inline int drbd_send_ping(struct drbd_conf *mdev) | |
1843 | { | |
1844 | struct p_header h; | |
1845 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING, &h, sizeof(h)); | |
1846 | } | |
1847 | ||
1848 | static inline int drbd_send_ping_ack(struct drbd_conf *mdev) | |
1849 | { | |
1850 | struct p_header h; | |
1851 | return drbd_send_cmd(mdev, USE_META_SOCKET, P_PING_ACK, &h, sizeof(h)); | |
1852 | } | |
1853 | ||
1854 | static inline void drbd_thread_stop(struct drbd_thread *thi) | |
1855 | { | |
1856 | _drbd_thread_stop(thi, FALSE, TRUE); | |
1857 | } | |
1858 | ||
1859 | static inline void drbd_thread_stop_nowait(struct drbd_thread *thi) | |
1860 | { | |
1861 | _drbd_thread_stop(thi, FALSE, FALSE); | |
1862 | } | |
1863 | ||
1864 | static inline void drbd_thread_restart_nowait(struct drbd_thread *thi) | |
1865 | { | |
1866 | _drbd_thread_stop(thi, TRUE, FALSE); | |
1867 | } | |
1868 | ||
1869 | /* counts how many answer packets packets we expect from our peer, | |
1870 | * for either explicit application requests, | |
1871 | * or implicit barrier packets as necessary. | |
1872 | * increased: | |
1873 | * w_send_barrier | |
1874 | * _req_mod(req, queue_for_net_write or queue_for_net_read); | |
1875 | * it is much easier and equally valid to count what we queue for the | |
1876 | * worker, even before it actually was queued or send. | |
1877 | * (drbd_make_request_common; recovery path on read io-error) | |
1878 | * decreased: | |
1879 | * got_BarrierAck (respective tl_clear, tl_clear_barrier) | |
1880 | * _req_mod(req, data_received) | |
1881 | * [from receive_DataReply] | |
1882 | * _req_mod(req, write_acked_by_peer or recv_acked_by_peer or neg_acked) | |
1883 | * [from got_BlockAck (P_WRITE_ACK, P_RECV_ACK)] | |
1884 | * for some reason it is NOT decreased in got_NegAck, | |
1885 | * but in the resulting cleanup code from report_params. | |
1886 | * we should try to remember the reason for that... | |
1887 | * _req_mod(req, send_failed or send_canceled) | |
1888 | * _req_mod(req, connection_lost_while_pending) | |
1889 | * [from tl_clear_barrier] | |
1890 | */ | |
1891 | static inline void inc_ap_pending(struct drbd_conf *mdev) | |
1892 | { | |
1893 | atomic_inc(&mdev->ap_pending_cnt); | |
1894 | } | |
1895 | ||
1896 | #define ERR_IF_CNT_IS_NEGATIVE(which) \ | |
1897 | if (atomic_read(&mdev->which) < 0) \ | |
1898 | dev_err(DEV, "in %s:%d: " #which " = %d < 0 !\n", \ | |
1899 | __func__ , __LINE__ , \ | |
1900 | atomic_read(&mdev->which)) | |
1901 | ||
1902 | #define dec_ap_pending(mdev) do { \ | |
1903 | typecheck(struct drbd_conf *, mdev); \ | |
1904 | if (atomic_dec_and_test(&mdev->ap_pending_cnt)) \ | |
1905 | wake_up(&mdev->misc_wait); \ | |
1906 | ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt); } while (0) | |
1907 | ||
1908 | /* counts how many resync-related answers we still expect from the peer | |
1909 | * increase decrease | |
1910 | * C_SYNC_TARGET sends P_RS_DATA_REQUEST (and expects P_RS_DATA_REPLY) | |
1911 | * C_SYNC_SOURCE sends P_RS_DATA_REPLY (and expects P_WRITE_ACK whith ID_SYNCER) | |
1912 | * (or P_NEG_ACK with ID_SYNCER) | |
1913 | */ | |
1914 | static inline void inc_rs_pending(struct drbd_conf *mdev) | |
1915 | { | |
1916 | atomic_inc(&mdev->rs_pending_cnt); | |
1917 | } | |
1918 | ||
1919 | #define dec_rs_pending(mdev) do { \ | |
1920 | typecheck(struct drbd_conf *, mdev); \ | |
1921 | atomic_dec(&mdev->rs_pending_cnt); \ | |
1922 | ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt); } while (0) | |
1923 | ||
1924 | /* counts how many answers we still need to send to the peer. | |
1925 | * increased on | |
1926 | * receive_Data unless protocol A; | |
1927 | * we need to send a P_RECV_ACK (proto B) | |
1928 | * or P_WRITE_ACK (proto C) | |
1929 | * receive_RSDataReply (recv_resync_read) we need to send a P_WRITE_ACK | |
1930 | * receive_DataRequest (receive_RSDataRequest) we need to send back P_DATA | |
1931 | * receive_Barrier_* we need to send a P_BARRIER_ACK | |
1932 | */ | |
1933 | static inline void inc_unacked(struct drbd_conf *mdev) | |
1934 | { | |
1935 | atomic_inc(&mdev->unacked_cnt); | |
1936 | } | |
1937 | ||
1938 | #define dec_unacked(mdev) do { \ | |
1939 | typecheck(struct drbd_conf *, mdev); \ | |
1940 | atomic_dec(&mdev->unacked_cnt); \ | |
1941 | ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) | |
1942 | ||
1943 | #define sub_unacked(mdev, n) do { \ | |
1944 | typecheck(struct drbd_conf *, mdev); \ | |
1945 | atomic_sub(n, &mdev->unacked_cnt); \ | |
1946 | ERR_IF_CNT_IS_NEGATIVE(unacked_cnt); } while (0) | |
1947 | ||
1948 | ||
1949 | static inline void put_net_conf(struct drbd_conf *mdev) | |
1950 | { | |
1951 | if (atomic_dec_and_test(&mdev->net_cnt)) | |
1952 | wake_up(&mdev->misc_wait); | |
1953 | } | |
1954 | ||
1955 | /** | |
1956 | * get_net_conf() - Increase ref count on mdev->net_conf; Returns 0 if nothing there | |
1957 | * @mdev: DRBD device. | |
1958 | * | |
1959 | * You have to call put_net_conf() when finished working with mdev->net_conf. | |
1960 | */ | |
1961 | static inline int get_net_conf(struct drbd_conf *mdev) | |
1962 | { | |
1963 | int have_net_conf; | |
1964 | ||
1965 | atomic_inc(&mdev->net_cnt); | |
1966 | have_net_conf = mdev->state.conn >= C_UNCONNECTED; | |
1967 | if (!have_net_conf) | |
1968 | put_net_conf(mdev); | |
1969 | return have_net_conf; | |
1970 | } | |
1971 | ||
1972 | /** | |
1973 | * get_ldev() - Increase the ref count on mdev->ldev. Returns 0 if there is no ldev | |
1974 | * @M: DRBD device. | |
1975 | * | |
1976 | * You have to call put_ldev() when finished working with mdev->ldev. | |
1977 | */ | |
1978 | #define get_ldev(M) __cond_lock(local, _get_ldev_if_state(M,D_INCONSISTENT)) | |
1979 | #define get_ldev_if_state(M,MINS) __cond_lock(local, _get_ldev_if_state(M,MINS)) | |
1980 | ||
1981 | static inline void put_ldev(struct drbd_conf *mdev) | |
1982 | { | |
1983 | __release(local); | |
1984 | if (atomic_dec_and_test(&mdev->local_cnt)) | |
1985 | wake_up(&mdev->misc_wait); | |
1986 | D_ASSERT(atomic_read(&mdev->local_cnt) >= 0); | |
1987 | } | |
1988 | ||
1989 | #ifndef __CHECKER__ | |
1990 | static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins) | |
1991 | { | |
1992 | int io_allowed; | |
1993 | ||
1994 | atomic_inc(&mdev->local_cnt); | |
1995 | io_allowed = (mdev->state.disk >= mins); | |
1996 | if (!io_allowed) | |
1997 | put_ldev(mdev); | |
1998 | return io_allowed; | |
1999 | } | |
2000 | #else | |
2001 | extern int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins); | |
2002 | #endif | |
2003 | ||
2004 | /* you must have an "get_ldev" reference */ | |
2005 | static inline void drbd_get_syncer_progress(struct drbd_conf *mdev, | |
2006 | unsigned long *bits_left, unsigned int *per_mil_done) | |
2007 | { | |
2008 | /* | |
2009 | * this is to break it at compile time when we change that | |
2010 | * (we may feel 4TB maximum storage per drbd is not enough) | |
2011 | */ | |
2012 | typecheck(unsigned long, mdev->rs_total); | |
2013 | ||
2014 | /* note: both rs_total and rs_left are in bits, i.e. in | |
2015 | * units of BM_BLOCK_SIZE. | |
2016 | * for the percentage, we don't care. */ | |
2017 | ||
2018 | *bits_left = drbd_bm_total_weight(mdev) - mdev->rs_failed; | |
2019 | /* >> 10 to prevent overflow, | |
2020 | * +1 to prevent division by zero */ | |
2021 | if (*bits_left > mdev->rs_total) { | |
2022 | /* doh. maybe a logic bug somewhere. | |
2023 | * may also be just a race condition | |
2024 | * between this and a disconnect during sync. | |
2025 | * for now, just prevent in-kernel buffer overflow. | |
2026 | */ | |
2027 | smp_rmb(); | |
2028 | dev_warn(DEV, "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", | |
2029 | drbd_conn_str(mdev->state.conn), | |
2030 | *bits_left, mdev->rs_total, mdev->rs_failed); | |
2031 | *per_mil_done = 0; | |
2032 | } else { | |
2033 | /* make sure the calculation happens in long context */ | |
2034 | unsigned long tmp = 1000UL - | |
2035 | (*bits_left >> 10)*1000UL | |
2036 | / ((mdev->rs_total >> 10) + 1UL); | |
2037 | *per_mil_done = tmp; | |
2038 | } | |
2039 | } | |
2040 | ||
2041 | ||
2042 | /* this throttles on-the-fly application requests | |
2043 | * according to max_buffers settings; | |
2044 | * maybe re-implement using semaphores? */ | |
2045 | static inline int drbd_get_max_buffers(struct drbd_conf *mdev) | |
2046 | { | |
2047 | int mxb = 1000000; /* arbitrary limit on open requests */ | |
2048 | if (get_net_conf(mdev)) { | |
2049 | mxb = mdev->net_conf->max_buffers; | |
2050 | put_net_conf(mdev); | |
2051 | } | |
2052 | return mxb; | |
2053 | } | |
2054 | ||
2055 | static inline int drbd_state_is_stable(union drbd_state s) | |
2056 | { | |
2057 | ||
2058 | /* DO NOT add a default clause, we want the compiler to warn us | |
2059 | * for any newly introduced state we may have forgotten to add here */ | |
2060 | ||
2061 | switch ((enum drbd_conns)s.conn) { | |
2062 | /* new io only accepted when there is no connection, ... */ | |
2063 | case C_STANDALONE: | |
2064 | case C_WF_CONNECTION: | |
2065 | /* ... or there is a well established connection. */ | |
2066 | case C_CONNECTED: | |
2067 | case C_SYNC_SOURCE: | |
2068 | case C_SYNC_TARGET: | |
2069 | case C_VERIFY_S: | |
2070 | case C_VERIFY_T: | |
2071 | case C_PAUSED_SYNC_S: | |
2072 | case C_PAUSED_SYNC_T: | |
2073 | /* maybe stable, look at the disk state */ | |
2074 | break; | |
2075 | ||
2076 | /* no new io accepted during tansitional states | |
2077 | * like handshake or teardown */ | |
2078 | case C_DISCONNECTING: | |
2079 | case C_UNCONNECTED: | |
2080 | case C_TIMEOUT: | |
2081 | case C_BROKEN_PIPE: | |
2082 | case C_NETWORK_FAILURE: | |
2083 | case C_PROTOCOL_ERROR: | |
2084 | case C_TEAR_DOWN: | |
2085 | case C_WF_REPORT_PARAMS: | |
2086 | case C_STARTING_SYNC_S: | |
2087 | case C_STARTING_SYNC_T: | |
2088 | case C_WF_BITMAP_S: | |
2089 | case C_WF_BITMAP_T: | |
2090 | case C_WF_SYNC_UUID: | |
2091 | case C_MASK: | |
2092 | /* not "stable" */ | |
2093 | return 0; | |
2094 | } | |
2095 | ||
2096 | switch ((enum drbd_disk_state)s.disk) { | |
2097 | case D_DISKLESS: | |
2098 | case D_INCONSISTENT: | |
2099 | case D_OUTDATED: | |
2100 | case D_CONSISTENT: | |
2101 | case D_UP_TO_DATE: | |
2102 | /* disk state is stable as well. */ | |
2103 | break; | |
2104 | ||
2105 | /* no new io accepted during tansitional states */ | |
2106 | case D_ATTACHING: | |
2107 | case D_FAILED: | |
2108 | case D_NEGOTIATING: | |
2109 | case D_UNKNOWN: | |
2110 | case D_MASK: | |
2111 | /* not "stable" */ | |
2112 | return 0; | |
2113 | } | |
2114 | ||
2115 | return 1; | |
2116 | } | |
2117 | ||
2118 | static inline int __inc_ap_bio_cond(struct drbd_conf *mdev) | |
2119 | { | |
2120 | int mxb = drbd_get_max_buffers(mdev); | |
2121 | ||
2122 | if (mdev->state.susp) | |
2123 | return 0; | |
2124 | if (test_bit(SUSPEND_IO, &mdev->flags)) | |
2125 | return 0; | |
2126 | ||
2127 | /* to avoid potential deadlock or bitmap corruption, | |
2128 | * in various places, we only allow new application io | |
2129 | * to start during "stable" states. */ | |
2130 | ||
2131 | /* no new io accepted when attaching or detaching the disk */ | |
2132 | if (!drbd_state_is_stable(mdev->state)) | |
2133 | return 0; | |
2134 | ||
2135 | /* since some older kernels don't have atomic_add_unless, | |
2136 | * and we are within the spinlock anyways, we have this workaround. */ | |
2137 | if (atomic_read(&mdev->ap_bio_cnt) > mxb) | |
2138 | return 0; | |
2139 | if (test_bit(BITMAP_IO, &mdev->flags)) | |
2140 | return 0; | |
2141 | return 1; | |
2142 | } | |
2143 | ||
2144 | /* I'd like to use wait_event_lock_irq, | |
2145 | * but I'm not sure when it got introduced, | |
2146 | * and not sure when it has 3 or 4 arguments */ | |
2147 | static inline void inc_ap_bio(struct drbd_conf *mdev, int one_or_two) | |
2148 | { | |
2149 | /* compare with after_state_ch, | |
2150 | * os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S */ | |
2151 | DEFINE_WAIT(wait); | |
2152 | ||
2153 | /* we wait here | |
2154 | * as long as the device is suspended | |
2155 | * until the bitmap is no longer on the fly during connection | |
2156 | * handshake as long as we would exeed the max_buffer limit. | |
2157 | * | |
2158 | * to avoid races with the reconnect code, | |
2159 | * we need to atomic_inc within the spinlock. */ | |
2160 | ||
2161 | spin_lock_irq(&mdev->req_lock); | |
2162 | while (!__inc_ap_bio_cond(mdev)) { | |
2163 | prepare_to_wait(&mdev->misc_wait, &wait, TASK_UNINTERRUPTIBLE); | |
2164 | spin_unlock_irq(&mdev->req_lock); | |
2165 | schedule(); | |
2166 | finish_wait(&mdev->misc_wait, &wait); | |
2167 | spin_lock_irq(&mdev->req_lock); | |
2168 | } | |
2169 | atomic_add(one_or_two, &mdev->ap_bio_cnt); | |
2170 | spin_unlock_irq(&mdev->req_lock); | |
2171 | } | |
2172 | ||
2173 | static inline void dec_ap_bio(struct drbd_conf *mdev) | |
2174 | { | |
2175 | int mxb = drbd_get_max_buffers(mdev); | |
2176 | int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); | |
2177 | ||
2178 | D_ASSERT(ap_bio >= 0); | |
2179 | /* this currently does wake_up for every dec_ap_bio! | |
2180 | * maybe rather introduce some type of hysteresis? | |
2181 | * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ | |
2182 | if (ap_bio < mxb) | |
2183 | wake_up(&mdev->misc_wait); | |
2184 | if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { | |
2185 | if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) | |
2186 | drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); | |
2187 | } | |
2188 | } | |
2189 | ||
2190 | static inline void drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) | |
2191 | { | |
2192 | mdev->ed_uuid = val; | |
2193 | } | |
2194 | ||
2195 | static inline int seq_cmp(u32 a, u32 b) | |
2196 | { | |
2197 | /* we assume wrap around at 32bit. | |
2198 | * for wrap around at 24bit (old atomic_t), | |
2199 | * we'd have to | |
2200 | * a <<= 8; b <<= 8; | |
2201 | */ | |
2202 | return (s32)(a) - (s32)(b); | |
2203 | } | |
2204 | #define seq_lt(a, b) (seq_cmp((a), (b)) < 0) | |
2205 | #define seq_gt(a, b) (seq_cmp((a), (b)) > 0) | |
2206 | #define seq_ge(a, b) (seq_cmp((a), (b)) >= 0) | |
2207 | #define seq_le(a, b) (seq_cmp((a), (b)) <= 0) | |
2208 | /* CAUTION: please no side effects in arguments! */ | |
2209 | #define seq_max(a, b) ((u32)(seq_gt((a), (b)) ? (a) : (b))) | |
2210 | ||
2211 | static inline void update_peer_seq(struct drbd_conf *mdev, unsigned int new_seq) | |
2212 | { | |
2213 | unsigned int m; | |
2214 | spin_lock(&mdev->peer_seq_lock); | |
2215 | m = seq_max(mdev->peer_seq, new_seq); | |
2216 | mdev->peer_seq = m; | |
2217 | spin_unlock(&mdev->peer_seq_lock); | |
2218 | if (m == new_seq) | |
2219 | wake_up(&mdev->seq_wait); | |
2220 | } | |
2221 | ||
2222 | static inline void drbd_update_congested(struct drbd_conf *mdev) | |
2223 | { | |
2224 | struct sock *sk = mdev->data.socket->sk; | |
2225 | if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) | |
2226 | set_bit(NET_CONGESTED, &mdev->flags); | |
2227 | } | |
2228 | ||
2229 | static inline int drbd_queue_order_type(struct drbd_conf *mdev) | |
2230 | { | |
2231 | /* sorry, we currently have no working implementation | |
2232 | * of distributed TCQ stuff */ | |
2233 | #ifndef QUEUE_ORDERED_NONE | |
2234 | #define QUEUE_ORDERED_NONE 0 | |
2235 | #endif | |
2236 | return QUEUE_ORDERED_NONE; | |
2237 | } | |
2238 | ||
2239 | static inline void drbd_blk_run_queue(struct request_queue *q) | |
2240 | { | |
2241 | if (q && q->unplug_fn) | |
2242 | q->unplug_fn(q); | |
2243 | } | |
2244 | ||
2245 | static inline void drbd_kick_lo(struct drbd_conf *mdev) | |
2246 | { | |
2247 | if (get_ldev(mdev)) { | |
2248 | drbd_blk_run_queue(bdev_get_queue(mdev->ldev->backing_bdev)); | |
2249 | put_ldev(mdev); | |
2250 | } | |
2251 | } | |
2252 | ||
2253 | static inline void drbd_md_flush(struct drbd_conf *mdev) | |
2254 | { | |
2255 | int r; | |
2256 | ||
2257 | if (test_bit(MD_NO_BARRIER, &mdev->flags)) | |
2258 | return; | |
2259 | ||
fbd9b09a DM |
2260 | r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL, |
2261 | BLKDEV_IFL_WAIT); | |
b411b363 PR |
2262 | if (r) { |
2263 | set_bit(MD_NO_BARRIER, &mdev->flags); | |
2264 | dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); | |
2265 | } | |
2266 | } | |
2267 | ||
2268 | #endif |