The DRBD driver
[linux-2.6-block.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/autoconf.h>
30 #include <linux/module.h>
31 #include <linux/version.h>
32 #include <linux/drbd.h>
33 #include <asm/uaccess.h>
34 #include <asm/types.h>
35 #include <net/sock.h>
36 #include <linux/ctype.h>
37 #include <linux/smp_lock.h>
38 #include <linux/fs.h>
39 #include <linux/file.h>
40 #include <linux/proc_fs.h>
41 #include <linux/init.h>
42 #include <linux/mm.h>
43 #include <linux/memcontrol.h>
44 #include <linux/mm_inline.h>
45 #include <linux/slab.h>
46 #include <linux/random.h>
47 #include <linux/reboot.h>
48 #include <linux/notifier.h>
49 #include <linux/kthread.h>
50
51 #define __KERNEL_SYSCALLS__
52 #include <linux/unistd.h>
53 #include <linux/vmalloc.h>
54
55 #include <linux/drbd_limits.h>
56 #include "drbd_int.h"
57 #include "drbd_tracing.h"
58 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59
60 #include "drbd_vli.h"
61
62 struct after_state_chg_work {
63         struct drbd_work w;
64         union drbd_state os;
65         union drbd_state ns;
66         enum chg_state_flags flags;
67         struct completion *done;
68 };
69
70 int drbdd_init(struct drbd_thread *);
71 int drbd_worker(struct drbd_thread *);
72 int drbd_asender(struct drbd_thread *);
73
74 int drbd_init(void);
75 static int drbd_open(struct block_device *bdev, fmode_t mode);
76 static int drbd_release(struct gendisk *gd, fmode_t mode);
77 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused);
78 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
79                            union drbd_state ns, enum chg_state_flags flags);
80 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused);
81 static void md_sync_timer_fn(unsigned long data);
82 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused);
83
84 DEFINE_TRACE(drbd_unplug);
85 DEFINE_TRACE(drbd_uuid);
86 DEFINE_TRACE(drbd_ee);
87 DEFINE_TRACE(drbd_packet);
88 DEFINE_TRACE(drbd_md_io);
89 DEFINE_TRACE(drbd_epoch);
90 DEFINE_TRACE(drbd_netlink);
91 DEFINE_TRACE(drbd_actlog);
92 DEFINE_TRACE(drbd_bio);
93 DEFINE_TRACE(_drbd_resync);
94 DEFINE_TRACE(drbd_req);
95
96 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
97               "Lars Ellenberg <lars@linbit.com>");
98 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
99 MODULE_VERSION(REL_VERSION);
100 MODULE_LICENSE("GPL");
101 MODULE_PARM_DESC(minor_count, "Maximum number of drbd devices (1-255)");
102 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
103
104 #include <linux/moduleparam.h>
105 /* allow_open_on_secondary */
106 MODULE_PARM_DESC(allow_oos, "DONT USE!");
107 /* thanks to these macros, if compiled into the kernel (not-module),
108  * this becomes the boot parameter drbd.minor_count */
109 module_param(minor_count, uint, 0444);
110 module_param(disable_sendpage, bool, 0644);
111 module_param(allow_oos, bool, 0);
112 module_param(cn_idx, uint, 0444);
113 module_param(proc_details, int, 0644);
114
115 #ifdef CONFIG_DRBD_FAULT_INJECTION
116 int enable_faults;
117 int fault_rate;
118 static int fault_count;
119 int fault_devs;
120 /* bitmap of enabled faults */
121 module_param(enable_faults, int, 0664);
122 /* fault rate % value - applies to all enabled faults */
123 module_param(fault_rate, int, 0664);
124 /* count of faults inserted */
125 module_param(fault_count, int, 0664);
126 /* bitmap of devices to insert faults on */
127 module_param(fault_devs, int, 0644);
128 #endif
129
130 /* module parameter, defined */
131 unsigned int minor_count = 32;
132 int disable_sendpage;
133 int allow_oos;
134 unsigned int cn_idx = CN_IDX_DRBD;
135 int proc_details;       /* Detail level in proc drbd*/
136
137 /* Module parameter for setting the user mode helper program
138  * to run. Default is /sbin/drbdadm */
139 char usermode_helper[80] = "/sbin/drbdadm";
140
141 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
142
143 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
144  * as member "struct gendisk *vdisk;"
145  */
146 struct drbd_conf **minor_table;
147
148 struct kmem_cache *drbd_request_cache;
149 struct kmem_cache *drbd_ee_cache;       /* epoch entries */
150 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
151 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
152 mempool_t *drbd_request_mempool;
153 mempool_t *drbd_ee_mempool;
154
155 /* I do not use a standard mempool, because:
156    1) I want to hand out the pre-allocated objects first.
157    2) I want to be able to interrupt sleeping allocation with a signal.
158    Note: This is a single linked list, the next pointer is the private
159          member of struct page.
160  */
161 struct page *drbd_pp_pool;
162 spinlock_t   drbd_pp_lock;
163 int          drbd_pp_vacant;
164 wait_queue_head_t drbd_pp_wait;
165
166 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
167
168 static struct block_device_operations drbd_ops = {
169         .owner =   THIS_MODULE,
170         .open =    drbd_open,
171         .release = drbd_release,
172 };
173
174 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
175
176 #ifdef __CHECKER__
177 /* When checking with sparse, and this is an inline function, sparse will
178    give tons of false positives. When this is a real functions sparse works.
179  */
180 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
181 {
182         int io_allowed;
183
184         atomic_inc(&mdev->local_cnt);
185         io_allowed = (mdev->state.disk >= mins);
186         if (!io_allowed) {
187                 if (atomic_dec_and_test(&mdev->local_cnt))
188                         wake_up(&mdev->misc_wait);
189         }
190         return io_allowed;
191 }
192
193 #endif
194
195 /**
196  * DOC: The transfer log
197  *
198  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
199  * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
200  * of the list. There is always at least one &struct drbd_tl_epoch object.
201  *
202  * Each &struct drbd_tl_epoch has a circular double linked list of requests
203  * attached.
204  */
205 static int tl_init(struct drbd_conf *mdev)
206 {
207         struct drbd_tl_epoch *b;
208
209         /* during device minor initialization, we may well use GFP_KERNEL */
210         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
211         if (!b)
212                 return 0;
213         INIT_LIST_HEAD(&b->requests);
214         INIT_LIST_HEAD(&b->w.list);
215         b->next = NULL;
216         b->br_number = 4711;
217         b->n_req = 0;
218         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
219
220         mdev->oldest_tle = b;
221         mdev->newest_tle = b;
222         INIT_LIST_HEAD(&mdev->out_of_sequence_requests);
223
224         mdev->tl_hash = NULL;
225         mdev->tl_hash_s = 0;
226
227         return 1;
228 }
229
230 static void tl_cleanup(struct drbd_conf *mdev)
231 {
232         D_ASSERT(mdev->oldest_tle == mdev->newest_tle);
233         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
234         kfree(mdev->oldest_tle);
235         mdev->oldest_tle = NULL;
236         kfree(mdev->unused_spare_tle);
237         mdev->unused_spare_tle = NULL;
238         kfree(mdev->tl_hash);
239         mdev->tl_hash = NULL;
240         mdev->tl_hash_s = 0;
241 }
242
243 /**
244  * _tl_add_barrier() - Adds a barrier to the transfer log
245  * @mdev:       DRBD device.
246  * @new:        Barrier to be added before the current head of the TL.
247  *
248  * The caller must hold the req_lock.
249  */
250 void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
251 {
252         struct drbd_tl_epoch *newest_before;
253
254         INIT_LIST_HEAD(&new->requests);
255         INIT_LIST_HEAD(&new->w.list);
256         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
257         new->next = NULL;
258         new->n_req = 0;
259
260         newest_before = mdev->newest_tle;
261         /* never send a barrier number == 0, because that is special-cased
262          * when using TCQ for our write ordering code */
263         new->br_number = (newest_before->br_number+1) ?: 1;
264         if (mdev->newest_tle != new) {
265                 mdev->newest_tle->next = new;
266                 mdev->newest_tle = new;
267         }
268 }
269
270 /**
271  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
272  * @mdev:       DRBD device.
273  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
274  * @set_size:   Expected number of requests before that barrier.
275  *
276  * In case the passed barrier_nr or set_size does not match the oldest
277  * &struct drbd_tl_epoch objects this function will cause a termination
278  * of the connection.
279  */
280 void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
281                        unsigned int set_size)
282 {
283         struct drbd_tl_epoch *b, *nob; /* next old barrier */
284         struct list_head *le, *tle;
285         struct drbd_request *r;
286
287         spin_lock_irq(&mdev->req_lock);
288
289         b = mdev->oldest_tle;
290
291         /* first some paranoia code */
292         if (b == NULL) {
293                 dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
294                         barrier_nr);
295                 goto bail;
296         }
297         if (b->br_number != barrier_nr) {
298                 dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
299                         barrier_nr, b->br_number);
300                 goto bail;
301         }
302         if (b->n_req != set_size) {
303                 dev_err(DEV, "BAD! BarrierAck #%u received with n_req=%u, expected n_req=%u!\n",
304                         barrier_nr, set_size, b->n_req);
305                 goto bail;
306         }
307
308         /* Clean up list of requests processed during current epoch */
309         list_for_each_safe(le, tle, &b->requests) {
310                 r = list_entry(le, struct drbd_request, tl_requests);
311                 _req_mod(r, barrier_acked);
312         }
313         /* There could be requests on the list waiting for completion
314            of the write to the local disk. To avoid corruptions of
315            slab's data structures we have to remove the lists head.
316
317            Also there could have been a barrier ack out of sequence, overtaking
318            the write acks - which would be a bug and violating write ordering.
319            To not deadlock in case we lose connection while such requests are
320            still pending, we need some way to find them for the
321            _req_mode(connection_lost_while_pending).
322
323            These have been list_move'd to the out_of_sequence_requests list in
324            _req_mod(, barrier_acked) above.
325            */
326         list_del_init(&b->requests);
327
328         nob = b->next;
329         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
330                 _tl_add_barrier(mdev, b);
331                 if (nob)
332                         mdev->oldest_tle = nob;
333                 /* if nob == NULL b was the only barrier, and becomes the new
334                    barrier. Therefore mdev->oldest_tle points already to b */
335         } else {
336                 D_ASSERT(nob != NULL);
337                 mdev->oldest_tle = nob;
338                 kfree(b);
339         }
340
341         spin_unlock_irq(&mdev->req_lock);
342         dec_ap_pending(mdev);
343
344         return;
345
346 bail:
347         spin_unlock_irq(&mdev->req_lock);
348         drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
349 }
350
351
352 /**
353  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
354  * @mdev:       DRBD device.
355  *
356  * This is called after the connection to the peer was lost. The storage covered
357  * by the requests on the transfer gets marked as our of sync. Called from the
358  * receiver thread and the worker thread.
359  */
360 void tl_clear(struct drbd_conf *mdev)
361 {
362         struct drbd_tl_epoch *b, *tmp;
363         struct list_head *le, *tle;
364         struct drbd_request *r;
365         int new_initial_bnr = net_random();
366
367         spin_lock_irq(&mdev->req_lock);
368
369         b = mdev->oldest_tle;
370         while (b) {
371                 list_for_each_safe(le, tle, &b->requests) {
372                         r = list_entry(le, struct drbd_request, tl_requests);
373                         /* It would be nice to complete outside of spinlock.
374                          * But this is easier for now. */
375                         _req_mod(r, connection_lost_while_pending);
376                 }
377                 tmp = b->next;
378
379                 /* there could still be requests on that ring list,
380                  * in case local io is still pending */
381                 list_del(&b->requests);
382
383                 /* dec_ap_pending corresponding to queue_barrier.
384                  * the newest barrier may not have been queued yet,
385                  * in which case w.cb is still NULL. */
386                 if (b->w.cb != NULL)
387                         dec_ap_pending(mdev);
388
389                 if (b == mdev->newest_tle) {
390                         /* recycle, but reinit! */
391                         D_ASSERT(tmp == NULL);
392                         INIT_LIST_HEAD(&b->requests);
393                         INIT_LIST_HEAD(&b->w.list);
394                         b->w.cb = NULL;
395                         b->br_number = new_initial_bnr;
396                         b->n_req = 0;
397
398                         mdev->oldest_tle = b;
399                         break;
400                 }
401                 kfree(b);
402                 b = tmp;
403         }
404
405         /* we expect this list to be empty. */
406         D_ASSERT(list_empty(&mdev->out_of_sequence_requests));
407
408         /* but just in case, clean it up anyways! */
409         list_for_each_safe(le, tle, &mdev->out_of_sequence_requests) {
410                 r = list_entry(le, struct drbd_request, tl_requests);
411                 /* It would be nice to complete outside of spinlock.
412                  * But this is easier for now. */
413                 _req_mod(r, connection_lost_while_pending);
414         }
415
416         /* ensure bit indicating barrier is required is clear */
417         clear_bit(CREATE_BARRIER, &mdev->flags);
418
419         spin_unlock_irq(&mdev->req_lock);
420 }
421
422 /**
423  * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
424  * @mdev:       DRBD device.
425  * @os:         old (current) state.
426  * @ns:         new (wanted) state.
427  */
428 static int cl_wide_st_chg(struct drbd_conf *mdev,
429                           union drbd_state os, union drbd_state ns)
430 {
431         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
432                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
433                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
434                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
435                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
436                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
437                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
438 }
439
440 int drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
441                       union drbd_state mask, union drbd_state val)
442 {
443         unsigned long flags;
444         union drbd_state os, ns;
445         int rv;
446
447         spin_lock_irqsave(&mdev->req_lock, flags);
448         os = mdev->state;
449         ns.i = (os.i & ~mask.i) | val.i;
450         rv = _drbd_set_state(mdev, ns, f, NULL);
451         ns = mdev->state;
452         spin_unlock_irqrestore(&mdev->req_lock, flags);
453
454         return rv;
455 }
456
457 /**
458  * drbd_force_state() - Impose a change which happens outside our control on our state
459  * @mdev:       DRBD device.
460  * @mask:       mask of state bits to change.
461  * @val:        value of new state bits.
462  */
463 void drbd_force_state(struct drbd_conf *mdev,
464         union drbd_state mask, union drbd_state val)
465 {
466         drbd_change_state(mdev, CS_HARD, mask, val);
467 }
468
469 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns);
470 static int is_valid_state_transition(struct drbd_conf *,
471                                      union drbd_state, union drbd_state);
472 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
473                                        union drbd_state ns, int *warn_sync_abort);
474 int drbd_send_state_req(struct drbd_conf *,
475                         union drbd_state, union drbd_state);
476
477 static enum drbd_state_ret_codes _req_st_cond(struct drbd_conf *mdev,
478                                     union drbd_state mask, union drbd_state val)
479 {
480         union drbd_state os, ns;
481         unsigned long flags;
482         int rv;
483
484         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
485                 return SS_CW_SUCCESS;
486
487         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
488                 return SS_CW_FAILED_BY_PEER;
489
490         rv = 0;
491         spin_lock_irqsave(&mdev->req_lock, flags);
492         os = mdev->state;
493         ns.i = (os.i & ~mask.i) | val.i;
494         ns = sanitize_state(mdev, os, ns, NULL);
495
496         if (!cl_wide_st_chg(mdev, os, ns))
497                 rv = SS_CW_NO_NEED;
498         if (!rv) {
499                 rv = is_valid_state(mdev, ns);
500                 if (rv == SS_SUCCESS) {
501                         rv = is_valid_state_transition(mdev, ns, os);
502                         if (rv == SS_SUCCESS)
503                                 rv = 0; /* cont waiting, otherwise fail. */
504                 }
505         }
506         spin_unlock_irqrestore(&mdev->req_lock, flags);
507
508         return rv;
509 }
510
511 /**
512  * drbd_req_state() - Perform an eventually cluster wide state change
513  * @mdev:       DRBD device.
514  * @mask:       mask of state bits to change.
515  * @val:        value of new state bits.
516  * @f:          flags
517  *
518  * Should not be called directly, use drbd_request_state() or
519  * _drbd_request_state().
520  */
521 static int drbd_req_state(struct drbd_conf *mdev,
522                           union drbd_state mask, union drbd_state val,
523                           enum chg_state_flags f)
524 {
525         struct completion done;
526         unsigned long flags;
527         union drbd_state os, ns;
528         int rv;
529
530         init_completion(&done);
531
532         if (f & CS_SERIALIZE)
533                 mutex_lock(&mdev->state_mutex);
534
535         spin_lock_irqsave(&mdev->req_lock, flags);
536         os = mdev->state;
537         ns.i = (os.i & ~mask.i) | val.i;
538         ns = sanitize_state(mdev, os, ns, NULL);
539
540         if (cl_wide_st_chg(mdev, os, ns)) {
541                 rv = is_valid_state(mdev, ns);
542                 if (rv == SS_SUCCESS)
543                         rv = is_valid_state_transition(mdev, ns, os);
544                 spin_unlock_irqrestore(&mdev->req_lock, flags);
545
546                 if (rv < SS_SUCCESS) {
547                         if (f & CS_VERBOSE)
548                                 print_st_err(mdev, os, ns, rv);
549                         goto abort;
550                 }
551
552                 drbd_state_lock(mdev);
553                 if (!drbd_send_state_req(mdev, mask, val)) {
554                         drbd_state_unlock(mdev);
555                         rv = SS_CW_FAILED_BY_PEER;
556                         if (f & CS_VERBOSE)
557                                 print_st_err(mdev, os, ns, rv);
558                         goto abort;
559                 }
560
561                 wait_event(mdev->state_wait,
562                         (rv = _req_st_cond(mdev, mask, val)));
563
564                 if (rv < SS_SUCCESS) {
565                         drbd_state_unlock(mdev);
566                         if (f & CS_VERBOSE)
567                                 print_st_err(mdev, os, ns, rv);
568                         goto abort;
569                 }
570                 spin_lock_irqsave(&mdev->req_lock, flags);
571                 os = mdev->state;
572                 ns.i = (os.i & ~mask.i) | val.i;
573                 rv = _drbd_set_state(mdev, ns, f, &done);
574                 drbd_state_unlock(mdev);
575         } else {
576                 rv = _drbd_set_state(mdev, ns, f, &done);
577         }
578
579         spin_unlock_irqrestore(&mdev->req_lock, flags);
580
581         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
582                 D_ASSERT(current != mdev->worker.task);
583                 wait_for_completion(&done);
584         }
585
586 abort:
587         if (f & CS_SERIALIZE)
588                 mutex_unlock(&mdev->state_mutex);
589
590         return rv;
591 }
592
593 /**
594  * _drbd_request_state() - Request a state change (with flags)
595  * @mdev:       DRBD device.
596  * @mask:       mask of state bits to change.
597  * @val:        value of new state bits.
598  * @f:          flags
599  *
600  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
601  * flag, or when logging of failed state change requests is not desired.
602  */
603 int _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
604                         union drbd_state val,   enum chg_state_flags f)
605 {
606         int rv;
607
608         wait_event(mdev->state_wait,
609                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
610
611         return rv;
612 }
613
614 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
615 {
616         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
617             name,
618             drbd_conn_str(ns.conn),
619             drbd_role_str(ns.role),
620             drbd_role_str(ns.peer),
621             drbd_disk_str(ns.disk),
622             drbd_disk_str(ns.pdsk),
623             ns.susp ? 's' : 'r',
624             ns.aftr_isp ? 'a' : '-',
625             ns.peer_isp ? 'p' : '-',
626             ns.user_isp ? 'u' : '-'
627             );
628 }
629
630 void print_st_err(struct drbd_conf *mdev,
631         union drbd_state os, union drbd_state ns, int err)
632 {
633         if (err == SS_IN_TRANSIENT_STATE)
634                 return;
635         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
636         print_st(mdev, " state", os);
637         print_st(mdev, "wanted", ns);
638 }
639
640
641 #define drbd_peer_str drbd_role_str
642 #define drbd_pdsk_str drbd_disk_str
643
644 #define drbd_susp_str(A)     ((A) ? "1" : "0")
645 #define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
646 #define drbd_peer_isp_str(A) ((A) ? "1" : "0")
647 #define drbd_user_isp_str(A) ((A) ? "1" : "0")
648
649 #define PSC(A) \
650         ({ if (ns.A != os.A) { \
651                 pbp += sprintf(pbp, #A "( %s -> %s ) ", \
652                               drbd_##A##_str(os.A), \
653                               drbd_##A##_str(ns.A)); \
654         } })
655
656 /**
657  * is_valid_state() - Returns an SS_ error code if ns is not valid
658  * @mdev:       DRBD device.
659  * @ns:         State to consider.
660  */
661 static int is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
662 {
663         /* See drbd_state_sw_errors in drbd_strings.c */
664
665         enum drbd_fencing_p fp;
666         int rv = SS_SUCCESS;
667
668         fp = FP_DONT_CARE;
669         if (get_ldev(mdev)) {
670                 fp = mdev->ldev->dc.fencing;
671                 put_ldev(mdev);
672         }
673
674         if (get_net_conf(mdev)) {
675                 if (!mdev->net_conf->two_primaries &&
676                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
677                         rv = SS_TWO_PRIMARIES;
678                 put_net_conf(mdev);
679         }
680
681         if (rv <= 0)
682                 /* already found a reason to abort */;
683         else if (ns.role == R_SECONDARY && mdev->open_cnt)
684                 rv = SS_DEVICE_IN_USE;
685
686         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
687                 rv = SS_NO_UP_TO_DATE_DISK;
688
689         else if (fp >= FP_RESOURCE &&
690                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
691                 rv = SS_PRIMARY_NOP;
692
693         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
694                 rv = SS_NO_UP_TO_DATE_DISK;
695
696         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
697                 rv = SS_NO_LOCAL_DISK;
698
699         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
700                 rv = SS_NO_REMOTE_DISK;
701
702         else if ((ns.conn == C_CONNECTED ||
703                   ns.conn == C_WF_BITMAP_S ||
704                   ns.conn == C_SYNC_SOURCE ||
705                   ns.conn == C_PAUSED_SYNC_S) &&
706                   ns.disk == D_OUTDATED)
707                 rv = SS_CONNECTED_OUTDATES;
708
709         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
710                  (mdev->sync_conf.verify_alg[0] == 0))
711                 rv = SS_NO_VERIFY_ALG;
712
713         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
714                   mdev->agreed_pro_version < 88)
715                 rv = SS_NOT_SUPPORTED;
716
717         return rv;
718 }
719
720 /**
721  * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
722  * @mdev:       DRBD device.
723  * @ns:         new state.
724  * @os:         old state.
725  */
726 static int is_valid_state_transition(struct drbd_conf *mdev,
727                                      union drbd_state ns, union drbd_state os)
728 {
729         int rv = SS_SUCCESS;
730
731         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
732             os.conn > C_CONNECTED)
733                 rv = SS_RESYNC_RUNNING;
734
735         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
736                 rv = SS_ALREADY_STANDALONE;
737
738         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
739                 rv = SS_IS_DISKLESS;
740
741         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
742                 rv = SS_NO_NET_CONFIG;
743
744         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
745                 rv = SS_LOWER_THAN_OUTDATED;
746
747         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
748                 rv = SS_IN_TRANSIENT_STATE;
749
750         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
751                 rv = SS_IN_TRANSIENT_STATE;
752
753         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
754                 rv = SS_NEED_CONNECTION;
755
756         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
757             ns.conn != os.conn && os.conn > C_CONNECTED)
758                 rv = SS_RESYNC_RUNNING;
759
760         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
761             os.conn < C_CONNECTED)
762                 rv = SS_NEED_CONNECTION;
763
764         return rv;
765 }
766
767 /**
768  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
769  * @mdev:       DRBD device.
770  * @os:         old state.
771  * @ns:         new state.
772  * @warn_sync_abort:
773  *
774  * When we loose connection, we have to set the state of the peers disk (pdsk)
775  * to D_UNKNOWN. This rule and many more along those lines are in this function.
776  */
777 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state os,
778                                        union drbd_state ns, int *warn_sync_abort)
779 {
780         enum drbd_fencing_p fp;
781
782         fp = FP_DONT_CARE;
783         if (get_ldev(mdev)) {
784                 fp = mdev->ldev->dc.fencing;
785                 put_ldev(mdev);
786         }
787
788         /* Disallow Network errors to configure a device's network part */
789         if ((ns.conn >= C_TIMEOUT && ns.conn <= C_TEAR_DOWN) &&
790             os.conn <= C_DISCONNECTING)
791                 ns.conn = os.conn;
792
793         /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow */
794         if (os.conn >= C_TIMEOUT && os.conn <= C_TEAR_DOWN &&
795             ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING)
796                 ns.conn = os.conn;
797
798         /* After C_DISCONNECTING only C_STANDALONE may follow */
799         if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
800                 ns.conn = os.conn;
801
802         if (ns.conn < C_CONNECTED) {
803                 ns.peer_isp = 0;
804                 ns.peer = R_UNKNOWN;
805                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
806                         ns.pdsk = D_UNKNOWN;
807         }
808
809         /* Clear the aftr_isp when becoming unconfigured */
810         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
811                 ns.aftr_isp = 0;
812
813         if (ns.conn <= C_DISCONNECTING && ns.disk == D_DISKLESS)
814                 ns.pdsk = D_UNKNOWN;
815
816         /* Abort resync if a disk fails/detaches */
817         if (os.conn > C_CONNECTED && ns.conn > C_CONNECTED &&
818             (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
819                 if (warn_sync_abort)
820                         *warn_sync_abort = 1;
821                 ns.conn = C_CONNECTED;
822         }
823
824         if (ns.conn >= C_CONNECTED &&
825             ((ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED) ||
826              (ns.disk == D_NEGOTIATING && ns.conn == C_WF_BITMAP_T))) {
827                 switch (ns.conn) {
828                 case C_WF_BITMAP_T:
829                 case C_PAUSED_SYNC_T:
830                         ns.disk = D_OUTDATED;
831                         break;
832                 case C_CONNECTED:
833                 case C_WF_BITMAP_S:
834                 case C_SYNC_SOURCE:
835                 case C_PAUSED_SYNC_S:
836                         ns.disk = D_UP_TO_DATE;
837                         break;
838                 case C_SYNC_TARGET:
839                         ns.disk = D_INCONSISTENT;
840                         dev_warn(DEV, "Implicitly set disk state Inconsistent!\n");
841                         break;
842                 }
843                 if (os.disk == D_OUTDATED && ns.disk == D_UP_TO_DATE)
844                         dev_warn(DEV, "Implicitly set disk from Outdated to UpToDate\n");
845         }
846
847         if (ns.conn >= C_CONNECTED &&
848             (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)) {
849                 switch (ns.conn) {
850                 case C_CONNECTED:
851                 case C_WF_BITMAP_T:
852                 case C_PAUSED_SYNC_T:
853                 case C_SYNC_TARGET:
854                         ns.pdsk = D_UP_TO_DATE;
855                         break;
856                 case C_WF_BITMAP_S:
857                 case C_PAUSED_SYNC_S:
858                         ns.pdsk = D_OUTDATED;
859                         break;
860                 case C_SYNC_SOURCE:
861                         ns.pdsk = D_INCONSISTENT;
862                         dev_warn(DEV, "Implicitly set pdsk Inconsistent!\n");
863                         break;
864                 }
865                 if (os.pdsk == D_OUTDATED && ns.pdsk == D_UP_TO_DATE)
866                         dev_warn(DEV, "Implicitly set pdsk from Outdated to UpToDate\n");
867         }
868
869         /* Connection breaks down before we finished "Negotiating" */
870         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
871             get_ldev_if_state(mdev, D_NEGOTIATING)) {
872                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
873                         ns.disk = mdev->new_state_tmp.disk;
874                         ns.pdsk = mdev->new_state_tmp.pdsk;
875                 } else {
876                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
877                         ns.disk = D_DISKLESS;
878                         ns.pdsk = D_UNKNOWN;
879                 }
880                 put_ldev(mdev);
881         }
882
883         if (fp == FP_STONITH &&
884             (ns.role == R_PRIMARY &&
885              ns.conn < C_CONNECTED &&
886              ns.pdsk > D_OUTDATED))
887                         ns.susp = 1;
888
889         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
890                 if (ns.conn == C_SYNC_SOURCE)
891                         ns.conn = C_PAUSED_SYNC_S;
892                 if (ns.conn == C_SYNC_TARGET)
893                         ns.conn = C_PAUSED_SYNC_T;
894         } else {
895                 if (ns.conn == C_PAUSED_SYNC_S)
896                         ns.conn = C_SYNC_SOURCE;
897                 if (ns.conn == C_PAUSED_SYNC_T)
898                         ns.conn = C_SYNC_TARGET;
899         }
900
901         return ns;
902 }
903
904 /* helper for __drbd_set_state */
905 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
906 {
907         if (cs == C_VERIFY_T) {
908                 /* starting online verify from an arbitrary position
909                  * does not fit well into the existing protocol.
910                  * on C_VERIFY_T, we initialize ov_left and friends
911                  * implicitly in receive_DataRequest once the
912                  * first P_OV_REQUEST is received */
913                 mdev->ov_start_sector = ~(sector_t)0;
914         } else {
915                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
916                 if (bit >= mdev->rs_total)
917                         mdev->ov_start_sector =
918                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
919                 mdev->ov_position = mdev->ov_start_sector;
920         }
921 }
922
923 /**
924  * __drbd_set_state() - Set a new DRBD state
925  * @mdev:       DRBD device.
926  * @ns:         new state.
927  * @flags:      Flags
928  * @done:       Optional completion, that will get completed after the after_state_ch() finished
929  *
930  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
931  */
932 int __drbd_set_state(struct drbd_conf *mdev,
933                     union drbd_state ns, enum chg_state_flags flags,
934                     struct completion *done)
935 {
936         union drbd_state os;
937         int rv = SS_SUCCESS;
938         int warn_sync_abort = 0;
939         struct after_state_chg_work *ascw;
940
941         os = mdev->state;
942
943         ns = sanitize_state(mdev, os, ns, &warn_sync_abort);
944
945         if (ns.i == os.i)
946                 return SS_NOTHING_TO_DO;
947
948         if (!(flags & CS_HARD)) {
949                 /*  pre-state-change checks ; only look at ns  */
950                 /* See drbd_state_sw_errors in drbd_strings.c */
951
952                 rv = is_valid_state(mdev, ns);
953                 if (rv < SS_SUCCESS) {
954                         /* If the old state was illegal as well, then let
955                            this happen...*/
956
957                         if (is_valid_state(mdev, os) == rv) {
958                                 dev_err(DEV, "Considering state change from bad state. "
959                                     "Error would be: '%s'\n",
960                                     drbd_set_st_err_str(rv));
961                                 print_st(mdev, "old", os);
962                                 print_st(mdev, "new", ns);
963                                 rv = is_valid_state_transition(mdev, ns, os);
964                         }
965                 } else
966                         rv = is_valid_state_transition(mdev, ns, os);
967         }
968
969         if (rv < SS_SUCCESS) {
970                 if (flags & CS_VERBOSE)
971                         print_st_err(mdev, os, ns, rv);
972                 return rv;
973         }
974
975         if (warn_sync_abort)
976                 dev_warn(DEV, "Resync aborted.\n");
977
978         {
979                 char *pbp, pb[300];
980                 pbp = pb;
981                 *pbp = 0;
982                 PSC(role);
983                 PSC(peer);
984                 PSC(conn);
985                 PSC(disk);
986                 PSC(pdsk);
987                 PSC(susp);
988                 PSC(aftr_isp);
989                 PSC(peer_isp);
990                 PSC(user_isp);
991                 dev_info(DEV, "%s\n", pb);
992         }
993
994         /* solve the race between becoming unconfigured,
995          * worker doing the cleanup, and
996          * admin reconfiguring us:
997          * on (re)configure, first set CONFIG_PENDING,
998          * then wait for a potentially exiting worker,
999          * start the worker, and schedule one no_op.
1000          * then proceed with configuration.
1001          */
1002         if (ns.disk == D_DISKLESS &&
1003             ns.conn == C_STANDALONE &&
1004             ns.role == R_SECONDARY &&
1005             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
1006                 set_bit(DEVICE_DYING, &mdev->flags);
1007
1008         mdev->state.i = ns.i;
1009         wake_up(&mdev->misc_wait);
1010         wake_up(&mdev->state_wait);
1011
1012         /*   post-state-change actions   */
1013         if (os.conn >= C_SYNC_SOURCE   && ns.conn <= C_CONNECTED) {
1014                 set_bit(STOP_SYNC_TIMER, &mdev->flags);
1015                 mod_timer(&mdev->resync_timer, jiffies);
1016         }
1017
1018         /* aborted verify run. log the last position */
1019         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
1020             ns.conn < C_CONNECTED) {
1021                 mdev->ov_start_sector =
1022                         BM_BIT_TO_SECT(mdev->rs_total - mdev->ov_left);
1023                 dev_info(DEV, "Online Verify reached sector %llu\n",
1024                         (unsigned long long)mdev->ov_start_sector);
1025         }
1026
1027         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
1028             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
1029                 dev_info(DEV, "Syncer continues.\n");
1030                 mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
1031                 if (ns.conn == C_SYNC_TARGET) {
1032                         if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))
1033                                 mod_timer(&mdev->resync_timer, jiffies);
1034                         /* This if (!test_bit) is only needed for the case
1035                            that a device that has ceased to used its timer,
1036                            i.e. it is already in drbd_resync_finished() gets
1037                            paused and resumed. */
1038                 }
1039         }
1040
1041         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
1042             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
1043                 dev_info(DEV, "Resync suspended\n");
1044                 mdev->rs_mark_time = jiffies;
1045                 if (ns.conn == C_PAUSED_SYNC_T)
1046                         set_bit(STOP_SYNC_TIMER, &mdev->flags);
1047         }
1048
1049         if (os.conn == C_CONNECTED &&
1050             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
1051                 mdev->ov_position = 0;
1052                 mdev->rs_total =
1053                 mdev->rs_mark_left = drbd_bm_bits(mdev);
1054                 if (mdev->agreed_pro_version >= 90)
1055                         set_ov_position(mdev, ns.conn);
1056                 else
1057                         mdev->ov_start_sector = 0;
1058                 mdev->ov_left = mdev->rs_total
1059                               - BM_SECT_TO_BIT(mdev->ov_position);
1060                 mdev->rs_start     =
1061                 mdev->rs_mark_time = jiffies;
1062                 mdev->ov_last_oos_size = 0;
1063                 mdev->ov_last_oos_start = 0;
1064
1065                 if (ns.conn == C_VERIFY_S) {
1066                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
1067                                         (unsigned long long)mdev->ov_position);
1068                         mod_timer(&mdev->resync_timer, jiffies);
1069                 }
1070         }
1071
1072         if (get_ldev(mdev)) {
1073                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
1074                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
1075                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
1076
1077                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
1078                         mdf |= MDF_CRASHED_PRIMARY;
1079                 if (mdev->state.role == R_PRIMARY ||
1080                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
1081                         mdf |= MDF_PRIMARY_IND;
1082                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
1083                         mdf |= MDF_CONNECTED_IND;
1084                 if (mdev->state.disk > D_INCONSISTENT)
1085                         mdf |= MDF_CONSISTENT;
1086                 if (mdev->state.disk > D_OUTDATED)
1087                         mdf |= MDF_WAS_UP_TO_DATE;
1088                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
1089                         mdf |= MDF_PEER_OUT_DATED;
1090                 if (mdf != mdev->ldev->md.flags) {
1091                         mdev->ldev->md.flags = mdf;
1092                         drbd_md_mark_dirty(mdev);
1093                 }
1094                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
1095                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
1096                 put_ldev(mdev);
1097         }
1098
1099         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1100         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
1101             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
1102                 set_bit(CONSIDER_RESYNC, &mdev->flags);
1103
1104         /* Receiver should clean up itself */
1105         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
1106                 drbd_thread_stop_nowait(&mdev->receiver);
1107
1108         /* Now the receiver finished cleaning up itself, it should die */
1109         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
1110                 drbd_thread_stop_nowait(&mdev->receiver);
1111
1112         /* Upon network failure, we need to restart the receiver. */
1113         if (os.conn > C_TEAR_DOWN &&
1114             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
1115                 drbd_thread_restart_nowait(&mdev->receiver);
1116
1117         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
1118         if (ascw) {
1119                 ascw->os = os;
1120                 ascw->ns = ns;
1121                 ascw->flags = flags;
1122                 ascw->w.cb = w_after_state_ch;
1123                 ascw->done = done;
1124                 drbd_queue_work(&mdev->data.work, &ascw->w);
1125         } else {
1126                 dev_warn(DEV, "Could not kmalloc an ascw\n");
1127         }
1128
1129         return rv;
1130 }
1131
1132 static int w_after_state_ch(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1133 {
1134         struct after_state_chg_work *ascw =
1135                 container_of(w, struct after_state_chg_work, w);
1136         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
1137         if (ascw->flags & CS_WAIT_COMPLETE) {
1138                 D_ASSERT(ascw->done != NULL);
1139                 complete(ascw->done);
1140         }
1141         kfree(ascw);
1142
1143         return 1;
1144 }
1145
1146 static void abw_start_sync(struct drbd_conf *mdev, int rv)
1147 {
1148         if (rv) {
1149                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
1150                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
1151                 return;
1152         }
1153
1154         switch (mdev->state.conn) {
1155         case C_STARTING_SYNC_T:
1156                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
1157                 break;
1158         case C_STARTING_SYNC_S:
1159                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1160                 break;
1161         }
1162 }
1163
1164 /**
1165  * after_state_ch() - Perform after state change actions that may sleep
1166  * @mdev:       DRBD device.
1167  * @os:         old state.
1168  * @ns:         new state.
1169  * @flags:      Flags
1170  */
1171 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
1172                            union drbd_state ns, enum chg_state_flags flags)
1173 {
1174         enum drbd_fencing_p fp;
1175
1176         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
1177                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1178                 if (mdev->p_uuid)
1179                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
1180         }
1181
1182         fp = FP_DONT_CARE;
1183         if (get_ldev(mdev)) {
1184                 fp = mdev->ldev->dc.fencing;
1185                 put_ldev(mdev);
1186         }
1187
1188         /* Inform userspace about the change... */
1189         drbd_bcast_state(mdev, ns);
1190
1191         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
1192             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
1193                 drbd_khelper(mdev, "pri-on-incon-degr");
1194
1195         /* Here we have the actions that are performed after a
1196            state change. This function might sleep */
1197
1198         if (fp == FP_STONITH && ns.susp) {
1199                 /* case1: The outdate peer handler is successful:
1200                  * case2: The connection was established again: */
1201                 if ((os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) ||
1202                     (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)) {
1203                         tl_clear(mdev);
1204                         spin_lock_irq(&mdev->req_lock);
1205                         _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
1206                         spin_unlock_irq(&mdev->req_lock);
1207                 }
1208         }
1209         /* Do not change the order of the if above and the two below... */
1210         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1211                 drbd_send_uuids(mdev);
1212                 drbd_send_state(mdev);
1213         }
1214         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S)
1215                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL, "send_bitmap (WFBitMapS)");
1216
1217         /* Lost contact to peer's copy of the data */
1218         if ((os.pdsk >= D_INCONSISTENT &&
1219              os.pdsk != D_UNKNOWN &&
1220              os.pdsk != D_OUTDATED)
1221         &&  (ns.pdsk < D_INCONSISTENT ||
1222              ns.pdsk == D_UNKNOWN ||
1223              ns.pdsk == D_OUTDATED)) {
1224                 kfree(mdev->p_uuid);
1225                 mdev->p_uuid = NULL;
1226                 if (get_ldev(mdev)) {
1227                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1228                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1229                                 drbd_uuid_new_current(mdev);
1230                                 drbd_send_uuids(mdev);
1231                         }
1232                         put_ldev(mdev);
1233                 }
1234         }
1235
1236         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1237                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0)
1238                         drbd_uuid_new_current(mdev);
1239
1240                 /* D_DISKLESS Peer becomes secondary */
1241                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1242                         drbd_al_to_on_disk_bm(mdev);
1243                 put_ldev(mdev);
1244         }
1245
1246         /* Last part of the attaching process ... */
1247         if (ns.conn >= C_CONNECTED &&
1248             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1249                 kfree(mdev->p_uuid); /* We expect to receive up-to-date UUIDs soon. */
1250                 mdev->p_uuid = NULL; /* ...to not use the old ones in the mean time */
1251                 drbd_send_sizes(mdev, 0);  /* to start sync... */
1252                 drbd_send_uuids(mdev);
1253                 drbd_send_state(mdev);
1254         }
1255
1256         /* We want to pause/continue resync, tell peer. */
1257         if (ns.conn >= C_CONNECTED &&
1258              ((os.aftr_isp != ns.aftr_isp) ||
1259               (os.user_isp != ns.user_isp)))
1260                 drbd_send_state(mdev);
1261
1262         /* In case one of the isp bits got set, suspend other devices. */
1263         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1264             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1265                 suspend_other_sg(mdev);
1266
1267         /* Make sure the peer gets informed about eventual state
1268            changes (ISP bits) while we were in WFReportParams. */
1269         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1270                 drbd_send_state(mdev);
1271
1272         /* We are in the progress to start a full sync... */
1273         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1274             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1275                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, &abw_start_sync, "set_n_write from StartingSync");
1276
1277         /* We are invalidating our self... */
1278         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1279             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1280                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
1281
1282         if (os.disk > D_FAILED && ns.disk == D_FAILED) {
1283                 enum drbd_io_error_p eh;
1284
1285                 eh = EP_PASS_ON;
1286                 if (get_ldev_if_state(mdev, D_FAILED)) {
1287                         eh = mdev->ldev->dc.on_io_error;
1288                         put_ldev(mdev);
1289                 }
1290
1291                 drbd_rs_cancel_all(mdev);
1292                 /* since get_ldev() only works as long as disk>=D_INCONSISTENT,
1293                    and it is D_DISKLESS here, local_cnt can only go down, it can
1294                    not increase... It will reach zero */
1295                 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1296                 mdev->rs_total = 0;
1297                 mdev->rs_failed = 0;
1298                 atomic_set(&mdev->rs_pending_cnt, 0);
1299
1300                 spin_lock_irq(&mdev->req_lock);
1301                 _drbd_set_state(_NS(mdev, disk, D_DISKLESS), CS_HARD, NULL);
1302                 spin_unlock_irq(&mdev->req_lock);
1303
1304                 if (eh == EP_CALL_HELPER)
1305                         drbd_khelper(mdev, "local-io-error");
1306         }
1307
1308         if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
1309
1310                 if (os.disk == D_FAILED) /* && ns.disk == D_DISKLESS*/ {
1311                         if (drbd_send_state(mdev))
1312                                 dev_warn(DEV, "Notified peer that my disk is broken.\n");
1313                         else
1314                                 dev_err(DEV, "Sending state in drbd_io_error() failed\n");
1315                 }
1316
1317                 lc_destroy(mdev->resync);
1318                 mdev->resync = NULL;
1319                 lc_destroy(mdev->act_log);
1320                 mdev->act_log = NULL;
1321                 __no_warn(local,
1322                         drbd_free_bc(mdev->ldev);
1323                         mdev->ldev = NULL;);
1324
1325                 if (mdev->md_io_tmpp)
1326                         __free_page(mdev->md_io_tmpp);
1327         }
1328
1329         /* Disks got bigger while they were detached */
1330         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1331             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1332                 if (ns.conn == C_CONNECTED)
1333                         resync_after_online_grow(mdev);
1334         }
1335
1336         /* A resync finished or aborted, wake paused devices... */
1337         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1338             (os.peer_isp && !ns.peer_isp) ||
1339             (os.user_isp && !ns.user_isp))
1340                 resume_next_sg(mdev);
1341
1342         /* Upon network connection, we need to start the receiver */
1343         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1344                 drbd_thread_start(&mdev->receiver);
1345
1346         /* Terminate worker thread if we are unconfigured - it will be
1347            restarted as needed... */
1348         if (ns.disk == D_DISKLESS &&
1349             ns.conn == C_STANDALONE &&
1350             ns.role == R_SECONDARY) {
1351                 if (os.aftr_isp != ns.aftr_isp)
1352                         resume_next_sg(mdev);
1353                 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1354                 if (test_bit(DEVICE_DYING, &mdev->flags))
1355                         drbd_thread_stop_nowait(&mdev->worker);
1356         }
1357
1358         drbd_md_sync(mdev);
1359 }
1360
1361
1362 static int drbd_thread_setup(void *arg)
1363 {
1364         struct drbd_thread *thi = (struct drbd_thread *) arg;
1365         struct drbd_conf *mdev = thi->mdev;
1366         unsigned long flags;
1367         int retval;
1368
1369 restart:
1370         retval = thi->function(thi);
1371
1372         spin_lock_irqsave(&thi->t_lock, flags);
1373
1374         /* if the receiver has been "Exiting", the last thing it did
1375          * was set the conn state to "StandAlone",
1376          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1377          * and receiver thread will be "started".
1378          * drbd_thread_start needs to set "Restarting" in that case.
1379          * t_state check and assignment needs to be within the same spinlock,
1380          * so either thread_start sees Exiting, and can remap to Restarting,
1381          * or thread_start see None, and can proceed as normal.
1382          */
1383
1384         if (thi->t_state == Restarting) {
1385                 dev_info(DEV, "Restarting %s\n", current->comm);
1386                 thi->t_state = Running;
1387                 spin_unlock_irqrestore(&thi->t_lock, flags);
1388                 goto restart;
1389         }
1390
1391         thi->task = NULL;
1392         thi->t_state = None;
1393         smp_mb();
1394         complete(&thi->stop);
1395         spin_unlock_irqrestore(&thi->t_lock, flags);
1396
1397         dev_info(DEV, "Terminating %s\n", current->comm);
1398
1399         /* Release mod reference taken when thread was started */
1400         module_put(THIS_MODULE);
1401         return retval;
1402 }
1403
1404 static void drbd_thread_init(struct drbd_conf *mdev, struct drbd_thread *thi,
1405                       int (*func) (struct drbd_thread *))
1406 {
1407         spin_lock_init(&thi->t_lock);
1408         thi->task    = NULL;
1409         thi->t_state = None;
1410         thi->function = func;
1411         thi->mdev = mdev;
1412 }
1413
1414 int drbd_thread_start(struct drbd_thread *thi)
1415 {
1416         struct drbd_conf *mdev = thi->mdev;
1417         struct task_struct *nt;
1418         unsigned long flags;
1419
1420         const char *me =
1421                 thi == &mdev->receiver ? "receiver" :
1422                 thi == &mdev->asender  ? "asender"  :
1423                 thi == &mdev->worker   ? "worker"   : "NONSENSE";
1424
1425         /* is used from state engine doing drbd_thread_stop_nowait,
1426          * while holding the req lock irqsave */
1427         spin_lock_irqsave(&thi->t_lock, flags);
1428
1429         switch (thi->t_state) {
1430         case None:
1431                 dev_info(DEV, "Starting %s thread (from %s [%d])\n",
1432                                 me, current->comm, current->pid);
1433
1434                 /* Get ref on module for thread - this is released when thread exits */
1435                 if (!try_module_get(THIS_MODULE)) {
1436                         dev_err(DEV, "Failed to get module reference in drbd_thread_start\n");
1437                         spin_unlock_irqrestore(&thi->t_lock, flags);
1438                         return FALSE;
1439                 }
1440
1441                 init_completion(&thi->stop);
1442                 D_ASSERT(thi->task == NULL);
1443                 thi->reset_cpu_mask = 1;
1444                 thi->t_state = Running;
1445                 spin_unlock_irqrestore(&thi->t_lock, flags);
1446                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
1447
1448                 nt = kthread_create(drbd_thread_setup, (void *) thi,
1449                                     "drbd%d_%s", mdev_to_minor(mdev), me);
1450
1451                 if (IS_ERR(nt)) {
1452                         dev_err(DEV, "Couldn't start thread\n");
1453
1454                         module_put(THIS_MODULE);
1455                         return FALSE;
1456                 }
1457                 spin_lock_irqsave(&thi->t_lock, flags);
1458                 thi->task = nt;
1459                 thi->t_state = Running;
1460                 spin_unlock_irqrestore(&thi->t_lock, flags);
1461                 wake_up_process(nt);
1462                 break;
1463         case Exiting:
1464                 thi->t_state = Restarting;
1465                 dev_info(DEV, "Restarting %s thread (from %s [%d])\n",
1466                                 me, current->comm, current->pid);
1467                 /* fall through */
1468         case Running:
1469         case Restarting:
1470         default:
1471                 spin_unlock_irqrestore(&thi->t_lock, flags);
1472                 break;
1473         }
1474
1475         return TRUE;
1476 }
1477
1478
1479 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
1480 {
1481         unsigned long flags;
1482
1483         enum drbd_thread_state ns = restart ? Restarting : Exiting;
1484
1485         /* may be called from state engine, holding the req lock irqsave */
1486         spin_lock_irqsave(&thi->t_lock, flags);
1487
1488         if (thi->t_state == None) {
1489                 spin_unlock_irqrestore(&thi->t_lock, flags);
1490                 if (restart)
1491                         drbd_thread_start(thi);
1492                 return;
1493         }
1494
1495         if (thi->t_state != ns) {
1496                 if (thi->task == NULL) {
1497                         spin_unlock_irqrestore(&thi->t_lock, flags);
1498                         return;
1499                 }
1500
1501                 thi->t_state = ns;
1502                 smp_mb();
1503                 init_completion(&thi->stop);
1504                 if (thi->task != current)
1505                         force_sig(DRBD_SIGKILL, thi->task);
1506
1507         }
1508
1509         spin_unlock_irqrestore(&thi->t_lock, flags);
1510
1511         if (wait)
1512                 wait_for_completion(&thi->stop);
1513 }
1514
1515 #ifdef CONFIG_SMP
1516 /**
1517  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1518  * @mdev:       DRBD device.
1519  *
1520  * Forces all threads of a device onto the same CPU. This is beneficial for
1521  * DRBD's performance. May be overwritten by user's configuration.
1522  */
1523 void drbd_calc_cpu_mask(struct drbd_conf *mdev)
1524 {
1525         int ord, cpu;
1526
1527         /* user override. */
1528         if (cpumask_weight(mdev->cpu_mask))
1529                 return;
1530
1531         ord = mdev_to_minor(mdev) % cpumask_weight(cpu_online_mask);
1532         for_each_online_cpu(cpu) {
1533                 if (ord-- == 0) {
1534                         cpumask_set_cpu(cpu, mdev->cpu_mask);
1535                         return;
1536                 }
1537         }
1538         /* should not be reached */
1539         cpumask_setall(mdev->cpu_mask);
1540 }
1541
1542 /**
1543  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1544  * @mdev:       DRBD device.
1545  *
1546  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1547  * prematurely.
1548  */
1549 void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
1550 {
1551         struct task_struct *p = current;
1552         struct drbd_thread *thi =
1553                 p == mdev->asender.task  ? &mdev->asender  :
1554                 p == mdev->receiver.task ? &mdev->receiver :
1555                 p == mdev->worker.task   ? &mdev->worker   :
1556                 NULL;
1557         ERR_IF(thi == NULL)
1558                 return;
1559         if (!thi->reset_cpu_mask)
1560                 return;
1561         thi->reset_cpu_mask = 0;
1562         set_cpus_allowed_ptr(p, mdev->cpu_mask);
1563 }
1564 #endif
1565
1566 /* the appropriate socket mutex must be held already */
1567 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
1568                           enum drbd_packets cmd, struct p_header *h,
1569                           size_t size, unsigned msg_flags)
1570 {
1571         int sent, ok;
1572
1573         ERR_IF(!h) return FALSE;
1574         ERR_IF(!size) return FALSE;
1575
1576         h->magic   = BE_DRBD_MAGIC;
1577         h->command = cpu_to_be16(cmd);
1578         h->length  = cpu_to_be16(size-sizeof(struct p_header));
1579
1580         trace_drbd_packet(mdev, sock, 0, (void *)h, __FILE__, __LINE__);
1581         sent = drbd_send(mdev, sock, h, size, msg_flags);
1582
1583         ok = (sent == size);
1584         if (!ok)
1585                 dev_err(DEV, "short sent %s size=%d sent=%d\n",
1586                     cmdname(cmd), (int)size, sent);
1587         return ok;
1588 }
1589
1590 /* don't pass the socket. we may only look at it
1591  * when we hold the appropriate socket mutex.
1592  */
1593 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
1594                   enum drbd_packets cmd, struct p_header *h, size_t size)
1595 {
1596         int ok = 0;
1597         struct socket *sock;
1598
1599         if (use_data_socket) {
1600                 mutex_lock(&mdev->data.mutex);
1601                 sock = mdev->data.socket;
1602         } else {
1603                 mutex_lock(&mdev->meta.mutex);
1604                 sock = mdev->meta.socket;
1605         }
1606
1607         /* drbd_disconnect() could have called drbd_free_sock()
1608          * while we were waiting in down()... */
1609         if (likely(sock != NULL))
1610                 ok = _drbd_send_cmd(mdev, sock, cmd, h, size, 0);
1611
1612         if (use_data_socket)
1613                 mutex_unlock(&mdev->data.mutex);
1614         else
1615                 mutex_unlock(&mdev->meta.mutex);
1616         return ok;
1617 }
1618
1619 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
1620                    size_t size)
1621 {
1622         struct p_header h;
1623         int ok;
1624
1625         h.magic   = BE_DRBD_MAGIC;
1626         h.command = cpu_to_be16(cmd);
1627         h.length  = cpu_to_be16(size);
1628
1629         if (!drbd_get_data_sock(mdev))
1630                 return 0;
1631
1632         trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&h, __FILE__, __LINE__);
1633
1634         ok = (sizeof(h) ==
1635                 drbd_send(mdev, mdev->data.socket, &h, sizeof(h), 0));
1636         ok = ok && (size ==
1637                 drbd_send(mdev, mdev->data.socket, data, size, 0));
1638
1639         drbd_put_data_sock(mdev);
1640
1641         return ok;
1642 }
1643
1644 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
1645 {
1646         struct p_rs_param_89 *p;
1647         struct socket *sock;
1648         int size, rv;
1649         const int apv = mdev->agreed_pro_version;
1650
1651         size = apv <= 87 ? sizeof(struct p_rs_param)
1652                 : apv == 88 ? sizeof(struct p_rs_param)
1653                         + strlen(mdev->sync_conf.verify_alg) + 1
1654                 : /* 89 */    sizeof(struct p_rs_param_89);
1655
1656         /* used from admin command context and receiver/worker context.
1657          * to avoid kmalloc, grab the socket right here,
1658          * then use the pre-allocated sbuf there */
1659         mutex_lock(&mdev->data.mutex);
1660         sock = mdev->data.socket;
1661
1662         if (likely(sock != NULL)) {
1663                 enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
1664
1665                 p = &mdev->data.sbuf.rs_param_89;
1666
1667                 /* initialize verify_alg and csums_alg */
1668                 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
1669
1670                 p->rate = cpu_to_be32(sc->rate);
1671
1672                 if (apv >= 88)
1673                         strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
1674                 if (apv >= 89)
1675                         strcpy(p->csums_alg, mdev->sync_conf.csums_alg);
1676
1677                 rv = _drbd_send_cmd(mdev, sock, cmd, &p->head, size, 0);
1678         } else
1679                 rv = 0; /* not ok */
1680
1681         mutex_unlock(&mdev->data.mutex);
1682
1683         return rv;
1684 }
1685
1686 int drbd_send_protocol(struct drbd_conf *mdev)
1687 {
1688         struct p_protocol *p;
1689         int size, rv;
1690
1691         size = sizeof(struct p_protocol);
1692
1693         if (mdev->agreed_pro_version >= 87)
1694                 size += strlen(mdev->net_conf->integrity_alg) + 1;
1695
1696         /* we must not recurse into our own queue,
1697          * as that is blocked during handshake */
1698         p = kmalloc(size, GFP_NOIO);
1699         if (p == NULL)
1700                 return 0;
1701
1702         p->protocol      = cpu_to_be32(mdev->net_conf->wire_protocol);
1703         p->after_sb_0p   = cpu_to_be32(mdev->net_conf->after_sb_0p);
1704         p->after_sb_1p   = cpu_to_be32(mdev->net_conf->after_sb_1p);
1705         p->after_sb_2p   = cpu_to_be32(mdev->net_conf->after_sb_2p);
1706         p->want_lose     = cpu_to_be32(mdev->net_conf->want_lose);
1707         p->two_primaries = cpu_to_be32(mdev->net_conf->two_primaries);
1708
1709         if (mdev->agreed_pro_version >= 87)
1710                 strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
1711
1712         rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
1713                            (struct p_header *)p, size);
1714         kfree(p);
1715         return rv;
1716 }
1717
1718 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
1719 {
1720         struct p_uuids p;
1721         int i;
1722
1723         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
1724                 return 1;
1725
1726         for (i = UI_CURRENT; i < UI_SIZE; i++)
1727                 p.uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
1728
1729         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
1730         p.uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
1731         uuid_flags |= mdev->net_conf->want_lose ? 1 : 0;
1732         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
1733         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
1734         p.uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
1735
1736         put_ldev(mdev);
1737
1738         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
1739                              (struct p_header *)&p, sizeof(p));
1740 }
1741
1742 int drbd_send_uuids(struct drbd_conf *mdev)
1743 {
1744         return _drbd_send_uuids(mdev, 0);
1745 }
1746
1747 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1748 {
1749         return _drbd_send_uuids(mdev, 8);
1750 }
1751
1752
1753 int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
1754 {
1755         struct p_rs_uuid p;
1756
1757         p.uuid = cpu_to_be64(val);
1758
1759         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
1760                              (struct p_header *)&p, sizeof(p));
1761 }
1762
1763 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply)
1764 {
1765         struct p_sizes p;
1766         sector_t d_size, u_size;
1767         int q_order_type;
1768         int ok;
1769
1770         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1771                 D_ASSERT(mdev->ldev->backing_bdev);
1772                 d_size = drbd_get_max_capacity(mdev->ldev);
1773                 u_size = mdev->ldev->dc.disk_size;
1774                 q_order_type = drbd_queue_order_type(mdev);
1775                 p.queue_order_type = cpu_to_be32(drbd_queue_order_type(mdev));
1776                 put_ldev(mdev);
1777         } else {
1778                 d_size = 0;
1779                 u_size = 0;
1780                 q_order_type = QUEUE_ORDERED_NONE;
1781         }
1782
1783         p.d_size = cpu_to_be64(d_size);
1784         p.u_size = cpu_to_be64(u_size);
1785         p.c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1786         p.max_segment_size = cpu_to_be32(queue_max_segment_size(mdev->rq_queue));
1787         p.queue_order_type = cpu_to_be32(q_order_type);
1788
1789         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
1790                            (struct p_header *)&p, sizeof(p));
1791         return ok;
1792 }
1793
1794 /**
1795  * drbd_send_state() - Sends the drbd state to the peer
1796  * @mdev:       DRBD device.
1797  */
1798 int drbd_send_state(struct drbd_conf *mdev)
1799 {
1800         struct socket *sock;
1801         struct p_state p;
1802         int ok = 0;
1803
1804         /* Grab state lock so we wont send state if we're in the middle
1805          * of a cluster wide state change on another thread */
1806         drbd_state_lock(mdev);
1807
1808         mutex_lock(&mdev->data.mutex);
1809
1810         p.state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1811         sock = mdev->data.socket;
1812
1813         if (likely(sock != NULL)) {
1814                 ok = _drbd_send_cmd(mdev, sock, P_STATE,
1815                                     (struct p_header *)&p, sizeof(p), 0);
1816         }
1817
1818         mutex_unlock(&mdev->data.mutex);
1819
1820         drbd_state_unlock(mdev);
1821         return ok;
1822 }
1823
1824 int drbd_send_state_req(struct drbd_conf *mdev,
1825         union drbd_state mask, union drbd_state val)
1826 {
1827         struct p_req_state p;
1828
1829         p.mask    = cpu_to_be32(mask.i);
1830         p.val     = cpu_to_be32(val.i);
1831
1832         return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
1833                              (struct p_header *)&p, sizeof(p));
1834 }
1835
1836 int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
1837 {
1838         struct p_req_state_reply p;
1839
1840         p.retcode    = cpu_to_be32(retcode);
1841
1842         return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
1843                              (struct p_header *)&p, sizeof(p));
1844 }
1845
1846 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1847         struct p_compressed_bm *p,
1848         struct bm_xfer_ctx *c)
1849 {
1850         struct bitstream bs;
1851         unsigned long plain_bits;
1852         unsigned long tmp;
1853         unsigned long rl;
1854         unsigned len;
1855         unsigned toggle;
1856         int bits;
1857
1858         /* may we use this feature? */
1859         if ((mdev->sync_conf.use_rle == 0) ||
1860                 (mdev->agreed_pro_version < 90))
1861                         return 0;
1862
1863         if (c->bit_offset >= c->bm_bits)
1864                 return 0; /* nothing to do. */
1865
1866         /* use at most thus many bytes */
1867         bitstream_init(&bs, p->code, BM_PACKET_VLI_BYTES_MAX, 0);
1868         memset(p->code, 0, BM_PACKET_VLI_BYTES_MAX);
1869         /* plain bits covered in this code string */
1870         plain_bits = 0;
1871
1872         /* p->encoding & 0x80 stores whether the first run length is set.
1873          * bit offset is implicit.
1874          * start with toggle == 2 to be able to tell the first iteration */
1875         toggle = 2;
1876
1877         /* see how much plain bits we can stuff into one packet
1878          * using RLE and VLI. */
1879         do {
1880                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1881                                     : _drbd_bm_find_next(mdev, c->bit_offset);
1882                 if (tmp == -1UL)
1883                         tmp = c->bm_bits;
1884                 rl = tmp - c->bit_offset;
1885
1886                 if (toggle == 2) { /* first iteration */
1887                         if (rl == 0) {
1888                                 /* the first checked bit was set,
1889                                  * store start value, */
1890                                 DCBP_set_start(p, 1);
1891                                 /* but skip encoding of zero run length */
1892                                 toggle = !toggle;
1893                                 continue;
1894                         }
1895                         DCBP_set_start(p, 0);
1896                 }
1897
1898                 /* paranoia: catch zero runlength.
1899                  * can only happen if bitmap is modified while we scan it. */
1900                 if (rl == 0) {
1901                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1902                             "t:%u bo:%lu\n", toggle, c->bit_offset);
1903                         return -1;
1904                 }
1905
1906                 bits = vli_encode_bits(&bs, rl);
1907                 if (bits == -ENOBUFS) /* buffer full */
1908                         break;
1909                 if (bits <= 0) {
1910                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1911                         return 0;
1912                 }
1913
1914                 toggle = !toggle;
1915                 plain_bits += rl;
1916                 c->bit_offset = tmp;
1917         } while (c->bit_offset < c->bm_bits);
1918
1919         len = bs.cur.b - p->code + !!bs.cur.bit;
1920
1921         if (plain_bits < (len << 3)) {
1922                 /* incompressible with this method.
1923                  * we need to rewind both word and bit position. */
1924                 c->bit_offset -= plain_bits;
1925                 bm_xfer_ctx_bit_to_word_offset(c);
1926                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1927                 return 0;
1928         }
1929
1930         /* RLE + VLI was able to compress it just fine.
1931          * update c->word_offset. */
1932         bm_xfer_ctx_bit_to_word_offset(c);
1933
1934         /* store pad_bits */
1935         DCBP_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1936
1937         return len;
1938 }
1939
1940 enum { OK, FAILED, DONE }
1941 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
1942         struct p_header *h, struct bm_xfer_ctx *c)
1943 {
1944         struct p_compressed_bm *p = (void*)h;
1945         unsigned long num_words;
1946         int len;
1947         int ok;
1948
1949         len = fill_bitmap_rle_bits(mdev, p, c);
1950
1951         if (len < 0)
1952                 return FAILED;
1953
1954         if (len) {
1955                 DCBP_set_code(p, RLE_VLI_Bits);
1956                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_COMPRESSED_BITMAP, h,
1957                         sizeof(*p) + len, 0);
1958
1959                 c->packets[0]++;
1960                 c->bytes[0] += sizeof(*p) + len;
1961
1962                 if (c->bit_offset >= c->bm_bits)
1963                         len = 0; /* DONE */
1964         } else {
1965                 /* was not compressible.
1966                  * send a buffer full of plain text bits instead. */
1967                 num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
1968                 len = num_words * sizeof(long);
1969                 if (len)
1970                         drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
1971                 ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
1972                                    h, sizeof(struct p_header) + len, 0);
1973                 c->word_offset += num_words;
1974                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1975
1976                 c->packets[1]++;
1977                 c->bytes[1] += sizeof(struct p_header) + len;
1978
1979                 if (c->bit_offset > c->bm_bits)
1980                         c->bit_offset = c->bm_bits;
1981         }
1982         ok = ok ? ((len == 0) ? DONE : OK) : FAILED;
1983
1984         if (ok == DONE)
1985                 INFO_bm_xfer_stats(mdev, "send", c);
1986         return ok;
1987 }
1988
1989 /* See the comment at receive_bitmap() */
1990 int _drbd_send_bitmap(struct drbd_conf *mdev)
1991 {
1992         struct bm_xfer_ctx c;
1993         struct p_header *p;
1994         int ret;
1995
1996         ERR_IF(!mdev->bitmap) return FALSE;
1997
1998         /* maybe we should use some per thread scratch page,
1999          * and allocate that during initial device creation? */
2000         p = (struct p_header *) __get_free_page(GFP_NOIO);
2001         if (!p) {
2002                 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
2003                 return FALSE;
2004         }
2005
2006         if (get_ldev(mdev)) {
2007                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
2008                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
2009                         drbd_bm_set_all(mdev);
2010                         if (drbd_bm_write(mdev)) {
2011                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2012                                  * but otherwise process as per normal - need to tell other
2013                                  * side that a full resync is required! */
2014                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
2015                         } else {
2016                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
2017                                 drbd_md_sync(mdev);
2018                         }
2019                 }
2020                 put_ldev(mdev);
2021         }
2022
2023         c = (struct bm_xfer_ctx) {
2024                 .bm_bits = drbd_bm_bits(mdev),
2025                 .bm_words = drbd_bm_words(mdev),
2026         };
2027
2028         do {
2029                 ret = send_bitmap_rle_or_plain(mdev, p, &c);
2030         } while (ret == OK);
2031
2032         free_page((unsigned long) p);
2033         return (ret == DONE);
2034 }
2035
2036 int drbd_send_bitmap(struct drbd_conf *mdev)
2037 {
2038         int err;
2039
2040         if (!drbd_get_data_sock(mdev))
2041                 return -1;
2042         err = !_drbd_send_bitmap(mdev);
2043         drbd_put_data_sock(mdev);
2044         return err;
2045 }
2046
2047 int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
2048 {
2049         int ok;
2050         struct p_barrier_ack p;
2051
2052         p.barrier  = barrier_nr;
2053         p.set_size = cpu_to_be32(set_size);
2054
2055         if (mdev->state.conn < C_CONNECTED)
2056                 return FALSE;
2057         ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
2058                         (struct p_header *)&p, sizeof(p));
2059         return ok;
2060 }
2061
2062 /**
2063  * _drbd_send_ack() - Sends an ack packet
2064  * @mdev:       DRBD device.
2065  * @cmd:        Packet command code.
2066  * @sector:     sector, needs to be in big endian byte order
2067  * @blksize:    size in byte, needs to be in big endian byte order
2068  * @block_id:   Id, big endian byte order
2069  */
2070 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
2071                           u64 sector,
2072                           u32 blksize,
2073                           u64 block_id)
2074 {
2075         int ok;
2076         struct p_block_ack p;
2077
2078         p.sector   = sector;
2079         p.block_id = block_id;
2080         p.blksize  = blksize;
2081         p.seq_num  = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
2082
2083         if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
2084                 return FALSE;
2085         ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
2086                                 (struct p_header *)&p, sizeof(p));
2087         return ok;
2088 }
2089
2090 int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
2091                      struct p_data *dp)
2092 {
2093         const int header_size = sizeof(struct p_data)
2094                               - sizeof(struct p_header);
2095         int data_size  = ((struct p_header *)dp)->length - header_size;
2096
2097         return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
2098                               dp->block_id);
2099 }
2100
2101 int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packets cmd,
2102                      struct p_block_req *rp)
2103 {
2104         return _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
2105 }
2106
2107 /**
2108  * drbd_send_ack() - Sends an ack packet
2109  * @mdev:       DRBD device.
2110  * @cmd:        Packet command code.
2111  * @e:          Epoch entry.
2112  */
2113 int drbd_send_ack(struct drbd_conf *mdev,
2114         enum drbd_packets cmd, struct drbd_epoch_entry *e)
2115 {
2116         return _drbd_send_ack(mdev, cmd,
2117                               cpu_to_be64(e->sector),
2118                               cpu_to_be32(e->size),
2119                               e->block_id);
2120 }
2121
2122 /* This function misuses the block_id field to signal if the blocks
2123  * are is sync or not. */
2124 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packets cmd,
2125                      sector_t sector, int blksize, u64 block_id)
2126 {
2127         return _drbd_send_ack(mdev, cmd,
2128                               cpu_to_be64(sector),
2129                               cpu_to_be32(blksize),
2130                               cpu_to_be64(block_id));
2131 }
2132
2133 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
2134                        sector_t sector, int size, u64 block_id)
2135 {
2136         int ok;
2137         struct p_block_req p;
2138
2139         p.sector   = cpu_to_be64(sector);
2140         p.block_id = block_id;
2141         p.blksize  = cpu_to_be32(size);
2142
2143         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
2144                                 (struct p_header *)&p, sizeof(p));
2145         return ok;
2146 }
2147
2148 int drbd_send_drequest_csum(struct drbd_conf *mdev,
2149                             sector_t sector, int size,
2150                             void *digest, int digest_size,
2151                             enum drbd_packets cmd)
2152 {
2153         int ok;
2154         struct p_block_req p;
2155
2156         p.sector   = cpu_to_be64(sector);
2157         p.block_id = BE_DRBD_MAGIC + 0xbeef;
2158         p.blksize  = cpu_to_be32(size);
2159
2160         p.head.magic   = BE_DRBD_MAGIC;
2161         p.head.command = cpu_to_be16(cmd);
2162         p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size);
2163
2164         mutex_lock(&mdev->data.mutex);
2165
2166         ok = (sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), 0));
2167         ok = ok && (digest_size == drbd_send(mdev, mdev->data.socket, digest, digest_size, 0));
2168
2169         mutex_unlock(&mdev->data.mutex);
2170
2171         return ok;
2172 }
2173
2174 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
2175 {
2176         int ok;
2177         struct p_block_req p;
2178
2179         p.sector   = cpu_to_be64(sector);
2180         p.block_id = BE_DRBD_MAGIC + 0xbabe;
2181         p.blksize  = cpu_to_be32(size);
2182
2183         ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
2184                            (struct p_header *)&p, sizeof(p));
2185         return ok;
2186 }
2187
2188 /* called on sndtimeo
2189  * returns FALSE if we should retry,
2190  * TRUE if we think connection is dead
2191  */
2192 static int we_should_drop_the_connection(struct drbd_conf *mdev, struct socket *sock)
2193 {
2194         int drop_it;
2195         /* long elapsed = (long)(jiffies - mdev->last_received); */
2196
2197         drop_it =   mdev->meta.socket == sock
2198                 || !mdev->asender.task
2199                 || get_t_state(&mdev->asender) != Running
2200                 || mdev->state.conn < C_CONNECTED;
2201
2202         if (drop_it)
2203                 return TRUE;
2204
2205         drop_it = !--mdev->ko_count;
2206         if (!drop_it) {
2207                 dev_err(DEV, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2208                        current->comm, current->pid, mdev->ko_count);
2209                 request_ping(mdev);
2210         }
2211
2212         return drop_it; /* && (mdev->state == R_PRIMARY) */;
2213 }
2214
2215 /* The idea of sendpage seems to be to put some kind of reference
2216  * to the page into the skb, and to hand it over to the NIC. In
2217  * this process get_page() gets called.
2218  *
2219  * As soon as the page was really sent over the network put_page()
2220  * gets called by some part of the network layer. [ NIC driver? ]
2221  *
2222  * [ get_page() / put_page() increment/decrement the count. If count
2223  *   reaches 0 the page will be freed. ]
2224  *
2225  * This works nicely with pages from FSs.
2226  * But this means that in protocol A we might signal IO completion too early!
2227  *
2228  * In order not to corrupt data during a resync we must make sure
2229  * that we do not reuse our own buffer pages (EEs) to early, therefore
2230  * we have the net_ee list.
2231  *
2232  * XFS seems to have problems, still, it submits pages with page_count == 0!
2233  * As a workaround, we disable sendpage on pages
2234  * with page_count == 0 or PageSlab.
2235  */
2236 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
2237                    int offset, size_t size)
2238 {
2239         int sent = drbd_send(mdev, mdev->data.socket, kmap(page) + offset, size, 0);
2240         kunmap(page);
2241         if (sent == size)
2242                 mdev->send_cnt += size>>9;
2243         return sent == size;
2244 }
2245
2246 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
2247                     int offset, size_t size)
2248 {
2249         mm_segment_t oldfs = get_fs();
2250         int sent, ok;
2251         int len = size;
2252
2253         /* e.g. XFS meta- & log-data is in slab pages, which have a
2254          * page_count of 0 and/or have PageSlab() set.
2255          * we cannot use send_page for those, as that does get_page();
2256          * put_page(); and would cause either a VM_BUG directly, or
2257          * __page_cache_release a page that would actually still be referenced
2258          * by someone, leading to some obscure delayed Oops somewhere else. */
2259         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
2260                 return _drbd_no_send_page(mdev, page, offset, size);
2261
2262         drbd_update_congested(mdev);
2263         set_fs(KERNEL_DS);
2264         do {
2265                 sent = mdev->data.socket->ops->sendpage(mdev->data.socket, page,
2266                                                         offset, len,
2267                                                         MSG_NOSIGNAL);
2268                 if (sent == -EAGAIN) {
2269                         if (we_should_drop_the_connection(mdev,
2270                                                           mdev->data.socket))
2271                                 break;
2272                         else
2273                                 continue;
2274                 }
2275                 if (sent <= 0) {
2276                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
2277                              __func__, (int)size, len, sent);
2278                         break;
2279                 }
2280                 len    -= sent;
2281                 offset += sent;
2282         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2283         set_fs(oldfs);
2284         clear_bit(NET_CONGESTED, &mdev->flags);
2285
2286         ok = (len == 0);
2287         if (likely(ok))
2288                 mdev->send_cnt += size>>9;
2289         return ok;
2290 }
2291
2292 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
2293 {
2294         struct bio_vec *bvec;
2295         int i;
2296         __bio_for_each_segment(bvec, bio, i, 0) {
2297                 if (!_drbd_no_send_page(mdev, bvec->bv_page,
2298                                      bvec->bv_offset, bvec->bv_len))
2299                         return 0;
2300         }
2301         return 1;
2302 }
2303
2304 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
2305 {
2306         struct bio_vec *bvec;
2307         int i;
2308         __bio_for_each_segment(bvec, bio, i, 0) {
2309                 if (!_drbd_send_page(mdev, bvec->bv_page,
2310                                      bvec->bv_offset, bvec->bv_len))
2311                         return 0;
2312         }
2313
2314         return 1;
2315 }
2316
2317 /* Used to send write requests
2318  * R_PRIMARY -> Peer    (P_DATA)
2319  */
2320 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
2321 {
2322         int ok = 1;
2323         struct p_data p;
2324         unsigned int dp_flags = 0;
2325         void *dgb;
2326         int dgs;
2327
2328         if (!drbd_get_data_sock(mdev))
2329                 return 0;
2330
2331         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2332                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2333
2334         p.head.magic   = BE_DRBD_MAGIC;
2335         p.head.command = cpu_to_be16(P_DATA);
2336         p.head.length  =
2337                 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size);
2338
2339         p.sector   = cpu_to_be64(req->sector);
2340         p.block_id = (unsigned long)req;
2341         p.seq_num  = cpu_to_be32(req->seq_num =
2342                                  atomic_add_return(1, &mdev->packet_seq));
2343         dp_flags = 0;
2344
2345         /* NOTE: no need to check if barriers supported here as we would
2346          *       not pass the test in make_request_common in that case
2347          */
2348         if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) {
2349                 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
2350                 /* dp_flags |= DP_HARDBARRIER; */
2351         }
2352         if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO))
2353                 dp_flags |= DP_RW_SYNC;
2354         /* for now handle SYNCIO and UNPLUG
2355          * as if they still were one and the same flag */
2356         if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG))
2357                 dp_flags |= DP_RW_SYNC;
2358         if (mdev->state.conn >= C_SYNC_SOURCE &&
2359             mdev->state.conn <= C_PAUSED_SYNC_T)
2360                 dp_flags |= DP_MAY_SET_IN_SYNC;
2361
2362         p.dp_flags = cpu_to_be32(dp_flags);
2363         trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
2364         set_bit(UNPLUG_REMOTE, &mdev->flags);
2365         ok = (sizeof(p) ==
2366                 drbd_send(mdev, mdev->data.socket, &p, sizeof(p), MSG_MORE));
2367         if (ok && dgs) {
2368                 dgb = mdev->int_dig_out;
2369                 drbd_csum(mdev, mdev->integrity_w_tfm, req->master_bio, dgb);
2370                 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
2371         }
2372         if (ok) {
2373                 if (mdev->net_conf->wire_protocol == DRBD_PROT_A)
2374                         ok = _drbd_send_bio(mdev, req->master_bio);
2375                 else
2376                         ok = _drbd_send_zc_bio(mdev, req->master_bio);
2377         }
2378
2379         drbd_put_data_sock(mdev);
2380         return ok;
2381 }
2382
2383 /* answer packet, used to send data back for read requests:
2384  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
2385  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
2386  */
2387 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
2388                     struct drbd_epoch_entry *e)
2389 {
2390         int ok;
2391         struct p_data p;
2392         void *dgb;
2393         int dgs;
2394
2395         dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
2396                 crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
2397
2398         p.head.magic   = BE_DRBD_MAGIC;
2399         p.head.command = cpu_to_be16(cmd);
2400         p.head.length  =
2401                 cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size);
2402
2403         p.sector   = cpu_to_be64(e->sector);
2404         p.block_id = e->block_id;
2405         /* p.seq_num  = 0;    No sequence numbers here.. */
2406
2407         /* Only called by our kernel thread.
2408          * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2409          * in response to admin command or module unload.
2410          */
2411         if (!drbd_get_data_sock(mdev))
2412                 return 0;
2413
2414         trace_drbd_packet(mdev, mdev->data.socket, 0, (void *)&p, __FILE__, __LINE__);
2415         ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
2416                                         sizeof(p), MSG_MORE);
2417         if (ok && dgs) {
2418                 dgb = mdev->int_dig_out;
2419                 drbd_csum(mdev, mdev->integrity_w_tfm, e->private_bio, dgb);
2420                 ok = drbd_send(mdev, mdev->data.socket, dgb, dgs, MSG_MORE);
2421         }
2422         if (ok)
2423                 ok = _drbd_send_zc_bio(mdev, e->private_bio);
2424
2425         drbd_put_data_sock(mdev);
2426         return ok;
2427 }
2428
2429 /*
2430   drbd_send distinguishes two cases:
2431
2432   Packets sent via the data socket "sock"
2433   and packets sent via the meta data socket "msock"
2434
2435                     sock                      msock
2436   -----------------+-------------------------+------------------------------
2437   timeout           conf.timeout / 2          conf.timeout / 2
2438   timeout action    send a ping via msock     Abort communication
2439                                               and close all sockets
2440 */
2441
2442 /*
2443  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2444  */
2445 int drbd_send(struct drbd_conf *mdev, struct socket *sock,
2446               void *buf, size_t size, unsigned msg_flags)
2447 {
2448         struct kvec iov;
2449         struct msghdr msg;
2450         int rv, sent = 0;
2451
2452         if (!sock)
2453                 return -1000;
2454
2455         /* THINK  if (signal_pending) return ... ? */
2456
2457         iov.iov_base = buf;
2458         iov.iov_len  = size;
2459
2460         msg.msg_name       = NULL;
2461         msg.msg_namelen    = 0;
2462         msg.msg_control    = NULL;
2463         msg.msg_controllen = 0;
2464         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
2465
2466         if (sock == mdev->data.socket) {
2467                 mdev->ko_count = mdev->net_conf->ko_count;
2468                 drbd_update_congested(mdev);
2469         }
2470         do {
2471                 /* STRANGE
2472                  * tcp_sendmsg does _not_ use its size parameter at all ?
2473                  *
2474                  * -EAGAIN on timeout, -EINTR on signal.
2475                  */
2476 /* THINK
2477  * do we need to block DRBD_SIG if sock == &meta.socket ??
2478  * otherwise wake_asender() might interrupt some send_*Ack !
2479  */
2480                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
2481                 if (rv == -EAGAIN) {
2482                         if (we_should_drop_the_connection(mdev, sock))
2483                                 break;
2484                         else
2485                                 continue;
2486                 }
2487                 D_ASSERT(rv != 0);
2488                 if (rv == -EINTR) {
2489                         flush_signals(current);
2490                         rv = 0;
2491                 }
2492                 if (rv < 0)
2493                         break;
2494                 sent += rv;
2495                 iov.iov_base += rv;
2496                 iov.iov_len  -= rv;
2497         } while (sent < size);
2498
2499         if (sock == mdev->data.socket)
2500                 clear_bit(NET_CONGESTED, &mdev->flags);
2501
2502         if (rv <= 0) {
2503                 if (rv != -EAGAIN) {
2504                         dev_err(DEV, "%s_sendmsg returned %d\n",
2505                             sock == mdev->meta.socket ? "msock" : "sock",
2506                             rv);
2507                         drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
2508                 } else
2509                         drbd_force_state(mdev, NS(conn, C_TIMEOUT));
2510         }
2511
2512         return sent;
2513 }
2514
2515 static int drbd_open(struct block_device *bdev, fmode_t mode)
2516 {
2517         struct drbd_conf *mdev = bdev->bd_disk->private_data;
2518         unsigned long flags;
2519         int rv = 0;
2520
2521         spin_lock_irqsave(&mdev->req_lock, flags);
2522         /* to have a stable mdev->state.role
2523          * and no race with updating open_cnt */
2524
2525         if (mdev->state.role != R_PRIMARY) {
2526                 if (mode & FMODE_WRITE)
2527                         rv = -EROFS;
2528                 else if (!allow_oos)
2529                         rv = -EMEDIUMTYPE;
2530         }
2531
2532         if (!rv)
2533                 mdev->open_cnt++;
2534         spin_unlock_irqrestore(&mdev->req_lock, flags);
2535
2536         return rv;
2537 }
2538
2539 static int drbd_release(struct gendisk *gd, fmode_t mode)
2540 {
2541         struct drbd_conf *mdev = gd->private_data;
2542         mdev->open_cnt--;
2543         return 0;
2544 }
2545
2546 static void drbd_unplug_fn(struct request_queue *q)
2547 {
2548         struct drbd_conf *mdev = q->queuedata;
2549
2550         trace_drbd_unplug(mdev, "got unplugged");
2551
2552         /* unplug FIRST */
2553         spin_lock_irq(q->queue_lock);
2554         blk_remove_plug(q);
2555         spin_unlock_irq(q->queue_lock);
2556
2557         /* only if connected */
2558         spin_lock_irq(&mdev->req_lock);
2559         if (mdev->state.pdsk >= D_INCONSISTENT && mdev->state.conn >= C_CONNECTED) {
2560                 D_ASSERT(mdev->state.role == R_PRIMARY);
2561                 if (test_and_clear_bit(UNPLUG_REMOTE, &mdev->flags)) {
2562                         /* add to the data.work queue,
2563                          * unless already queued.
2564                          * XXX this might be a good addition to drbd_queue_work
2565                          * anyways, to detect "double queuing" ... */
2566                         if (list_empty(&mdev->unplug_work.list))
2567                                 drbd_queue_work(&mdev->data.work,
2568                                                 &mdev->unplug_work);
2569                 }
2570         }
2571         spin_unlock_irq(&mdev->req_lock);
2572
2573         if (mdev->state.disk >= D_INCONSISTENT)
2574                 drbd_kick_lo(mdev);
2575 }
2576
2577 static void drbd_set_defaults(struct drbd_conf *mdev)
2578 {
2579         mdev->sync_conf.after      = DRBD_AFTER_DEF;
2580         mdev->sync_conf.rate       = DRBD_RATE_DEF;
2581         mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF;
2582         mdev->state = (union drbd_state) {
2583                 { .role = R_SECONDARY,
2584                   .peer = R_UNKNOWN,
2585                   .conn = C_STANDALONE,
2586                   .disk = D_DISKLESS,
2587                   .pdsk = D_UNKNOWN,
2588                   .susp = 0
2589                 } };
2590 }
2591
2592 void drbd_init_set_defaults(struct drbd_conf *mdev)
2593 {
2594         /* the memset(,0,) did most of this.
2595          * note: only assignments, no allocation in here */
2596
2597         drbd_set_defaults(mdev);
2598
2599         /* for now, we do NOT yet support it,
2600          * even though we start some framework
2601          * to eventually support barriers */
2602         set_bit(NO_BARRIER_SUPP, &mdev->flags);
2603
2604         atomic_set(&mdev->ap_bio_cnt, 0);
2605         atomic_set(&mdev->ap_pending_cnt, 0);
2606         atomic_set(&mdev->rs_pending_cnt, 0);
2607         atomic_set(&mdev->unacked_cnt, 0);
2608         atomic_set(&mdev->local_cnt, 0);
2609         atomic_set(&mdev->net_cnt, 0);
2610         atomic_set(&mdev->packet_seq, 0);
2611         atomic_set(&mdev->pp_in_use, 0);
2612
2613         mutex_init(&mdev->md_io_mutex);
2614         mutex_init(&mdev->data.mutex);
2615         mutex_init(&mdev->meta.mutex);
2616         sema_init(&mdev->data.work.s, 0);
2617         sema_init(&mdev->meta.work.s, 0);
2618         mutex_init(&mdev->state_mutex);
2619
2620         spin_lock_init(&mdev->data.work.q_lock);
2621         spin_lock_init(&mdev->meta.work.q_lock);
2622
2623         spin_lock_init(&mdev->al_lock);
2624         spin_lock_init(&mdev->req_lock);
2625         spin_lock_init(&mdev->peer_seq_lock);
2626         spin_lock_init(&mdev->epoch_lock);
2627
2628         INIT_LIST_HEAD(&mdev->active_ee);
2629         INIT_LIST_HEAD(&mdev->sync_ee);
2630         INIT_LIST_HEAD(&mdev->done_ee);
2631         INIT_LIST_HEAD(&mdev->read_ee);
2632         INIT_LIST_HEAD(&mdev->net_ee);
2633         INIT_LIST_HEAD(&mdev->resync_reads);
2634         INIT_LIST_HEAD(&mdev->data.work.q);
2635         INIT_LIST_HEAD(&mdev->meta.work.q);
2636         INIT_LIST_HEAD(&mdev->resync_work.list);
2637         INIT_LIST_HEAD(&mdev->unplug_work.list);
2638         INIT_LIST_HEAD(&mdev->md_sync_work.list);
2639         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2640         mdev->resync_work.cb  = w_resync_inactive;
2641         mdev->unplug_work.cb  = w_send_write_hint;
2642         mdev->md_sync_work.cb = w_md_sync;
2643         mdev->bm_io_work.w.cb = w_bitmap_io;
2644         init_timer(&mdev->resync_timer);
2645         init_timer(&mdev->md_sync_timer);
2646         mdev->resync_timer.function = resync_timer_fn;
2647         mdev->resync_timer.data = (unsigned long) mdev;
2648         mdev->md_sync_timer.function = md_sync_timer_fn;
2649         mdev->md_sync_timer.data = (unsigned long) mdev;
2650
2651         init_waitqueue_head(&mdev->misc_wait);
2652         init_waitqueue_head(&mdev->state_wait);
2653         init_waitqueue_head(&mdev->ee_wait);
2654         init_waitqueue_head(&mdev->al_wait);
2655         init_waitqueue_head(&mdev->seq_wait);
2656
2657         drbd_thread_init(mdev, &mdev->receiver, drbdd_init);
2658         drbd_thread_init(mdev, &mdev->worker, drbd_worker);
2659         drbd_thread_init(mdev, &mdev->asender, drbd_asender);
2660
2661         mdev->agreed_pro_version = PRO_VERSION_MAX;
2662         mdev->write_ordering = WO_bio_barrier;
2663         mdev->resync_wenr = LC_FREE;
2664 }
2665
2666 void drbd_mdev_cleanup(struct drbd_conf *mdev)
2667 {
2668         if (mdev->receiver.t_state != None)
2669                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2670                                 mdev->receiver.t_state);
2671
2672         /* no need to lock it, I'm the only thread alive */
2673         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
2674                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2675         mdev->al_writ_cnt  =
2676         mdev->bm_writ_cnt  =
2677         mdev->read_cnt     =
2678         mdev->recv_cnt     =
2679         mdev->send_cnt     =
2680         mdev->writ_cnt     =
2681         mdev->p_size       =
2682         mdev->rs_start     =
2683         mdev->rs_total     =
2684         mdev->rs_failed    =
2685         mdev->rs_mark_left =
2686         mdev->rs_mark_time = 0;
2687         D_ASSERT(mdev->net_conf == NULL);
2688
2689         drbd_set_my_capacity(mdev, 0);
2690         if (mdev->bitmap) {
2691                 /* maybe never allocated. */
2692                 drbd_bm_resize(mdev, 0);
2693                 drbd_bm_cleanup(mdev);
2694         }
2695
2696         drbd_free_resources(mdev);
2697
2698         /*
2699          * currently we drbd_init_ee only on module load, so
2700          * we may do drbd_release_ee only on module unload!
2701          */
2702         D_ASSERT(list_empty(&mdev->active_ee));
2703         D_ASSERT(list_empty(&mdev->sync_ee));
2704         D_ASSERT(list_empty(&mdev->done_ee));
2705         D_ASSERT(list_empty(&mdev->read_ee));
2706         D_ASSERT(list_empty(&mdev->net_ee));
2707         D_ASSERT(list_empty(&mdev->resync_reads));
2708         D_ASSERT(list_empty(&mdev->data.work.q));
2709         D_ASSERT(list_empty(&mdev->meta.work.q));
2710         D_ASSERT(list_empty(&mdev->resync_work.list));
2711         D_ASSERT(list_empty(&mdev->unplug_work.list));
2712
2713 }
2714
2715
2716 static void drbd_destroy_mempools(void)
2717 {
2718         struct page *page;
2719
2720         while (drbd_pp_pool) {
2721                 page = drbd_pp_pool;
2722                 drbd_pp_pool = (struct page *)page_private(page);
2723                 __free_page(page);
2724                 drbd_pp_vacant--;
2725         }
2726
2727         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2728
2729         if (drbd_ee_mempool)
2730                 mempool_destroy(drbd_ee_mempool);
2731         if (drbd_request_mempool)
2732                 mempool_destroy(drbd_request_mempool);
2733         if (drbd_ee_cache)
2734                 kmem_cache_destroy(drbd_ee_cache);
2735         if (drbd_request_cache)
2736                 kmem_cache_destroy(drbd_request_cache);
2737         if (drbd_bm_ext_cache)
2738                 kmem_cache_destroy(drbd_bm_ext_cache);
2739         if (drbd_al_ext_cache)
2740                 kmem_cache_destroy(drbd_al_ext_cache);
2741
2742         drbd_ee_mempool      = NULL;
2743         drbd_request_mempool = NULL;
2744         drbd_ee_cache        = NULL;
2745         drbd_request_cache   = NULL;
2746         drbd_bm_ext_cache    = NULL;
2747         drbd_al_ext_cache    = NULL;
2748
2749         return;
2750 }
2751
2752 static int drbd_create_mempools(void)
2753 {
2754         struct page *page;
2755         const int number = (DRBD_MAX_SEGMENT_SIZE/PAGE_SIZE) * minor_count;
2756         int i;
2757
2758         /* prepare our caches and mempools */
2759         drbd_request_mempool = NULL;
2760         drbd_ee_cache        = NULL;
2761         drbd_request_cache   = NULL;
2762         drbd_bm_ext_cache    = NULL;
2763         drbd_al_ext_cache    = NULL;
2764         drbd_pp_pool         = NULL;
2765
2766         /* caches */
2767         drbd_request_cache = kmem_cache_create(
2768                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2769         if (drbd_request_cache == NULL)
2770                 goto Enomem;
2771
2772         drbd_ee_cache = kmem_cache_create(
2773                 "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
2774         if (drbd_ee_cache == NULL)
2775                 goto Enomem;
2776
2777         drbd_bm_ext_cache = kmem_cache_create(
2778                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2779         if (drbd_bm_ext_cache == NULL)
2780                 goto Enomem;
2781
2782         drbd_al_ext_cache = kmem_cache_create(
2783                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2784         if (drbd_al_ext_cache == NULL)
2785                 goto Enomem;
2786
2787         /* mempools */
2788         drbd_request_mempool = mempool_create(number,
2789                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2790         if (drbd_request_mempool == NULL)
2791                 goto Enomem;
2792
2793         drbd_ee_mempool = mempool_create(number,
2794                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2795         if (drbd_request_mempool == NULL)
2796                 goto Enomem;
2797
2798         /* drbd's page pool */
2799         spin_lock_init(&drbd_pp_lock);
2800
2801         for (i = 0; i < number; i++) {
2802                 page = alloc_page(GFP_HIGHUSER);
2803                 if (!page)
2804                         goto Enomem;
2805                 set_page_private(page, (unsigned long)drbd_pp_pool);
2806                 drbd_pp_pool = page;
2807         }
2808         drbd_pp_vacant = number;
2809
2810         return 0;
2811
2812 Enomem:
2813         drbd_destroy_mempools(); /* in case we allocated some */
2814         return -ENOMEM;
2815 }
2816
2817 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2818         void *unused)
2819 {
2820         /* just so we have it.  you never know what interesting things we
2821          * might want to do here some day...
2822          */
2823
2824         return NOTIFY_DONE;
2825 }
2826
2827 static struct notifier_block drbd_notifier = {
2828         .notifier_call = drbd_notify_sys,
2829 };
2830
2831 static void drbd_release_ee_lists(struct drbd_conf *mdev)
2832 {
2833         int rr;
2834
2835         rr = drbd_release_ee(mdev, &mdev->active_ee);
2836         if (rr)
2837                 dev_err(DEV, "%d EEs in active list found!\n", rr);
2838
2839         rr = drbd_release_ee(mdev, &mdev->sync_ee);
2840         if (rr)
2841                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2842
2843         rr = drbd_release_ee(mdev, &mdev->read_ee);
2844         if (rr)
2845                 dev_err(DEV, "%d EEs in read list found!\n", rr);
2846
2847         rr = drbd_release_ee(mdev, &mdev->done_ee);
2848         if (rr)
2849                 dev_err(DEV, "%d EEs in done list found!\n", rr);
2850
2851         rr = drbd_release_ee(mdev, &mdev->net_ee);
2852         if (rr)
2853                 dev_err(DEV, "%d EEs in net list found!\n", rr);
2854 }
2855
2856 /* caution. no locking.
2857  * currently only used from module cleanup code. */
2858 static void drbd_delete_device(unsigned int minor)
2859 {
2860         struct drbd_conf *mdev = minor_to_mdev(minor);
2861
2862         if (!mdev)
2863                 return;
2864
2865         /* paranoia asserts */
2866         if (mdev->open_cnt != 0)
2867                 dev_err(DEV, "open_cnt = %d in %s:%u", mdev->open_cnt,
2868                                 __FILE__ , __LINE__);
2869
2870         ERR_IF (!list_empty(&mdev->data.work.q)) {
2871                 struct list_head *lp;
2872                 list_for_each(lp, &mdev->data.work.q) {
2873                         dev_err(DEV, "lp = %p\n", lp);
2874                 }
2875         };
2876         /* end paranoia asserts */
2877
2878         del_gendisk(mdev->vdisk);
2879
2880         /* cleanup stuff that may have been allocated during
2881          * device (re-)configuration or state changes */
2882
2883         if (mdev->this_bdev)
2884                 bdput(mdev->this_bdev);
2885
2886         drbd_free_resources(mdev);
2887
2888         drbd_release_ee_lists(mdev);
2889
2890         /* should be free'd on disconnect? */
2891         kfree(mdev->ee_hash);
2892         /*
2893         mdev->ee_hash_s = 0;
2894         mdev->ee_hash = NULL;
2895         */
2896
2897         lc_destroy(mdev->act_log);
2898         lc_destroy(mdev->resync);
2899
2900         kfree(mdev->p_uuid);
2901         /* mdev->p_uuid = NULL; */
2902
2903         kfree(mdev->int_dig_out);
2904         kfree(mdev->int_dig_in);
2905         kfree(mdev->int_dig_vv);
2906
2907         /* cleanup the rest that has been
2908          * allocated from drbd_new_device
2909          * and actually free the mdev itself */
2910         drbd_free_mdev(mdev);
2911 }
2912
2913 static void drbd_cleanup(void)
2914 {
2915         unsigned int i;
2916
2917         unregister_reboot_notifier(&drbd_notifier);
2918
2919         drbd_nl_cleanup();
2920
2921         if (minor_table) {
2922                 if (drbd_proc)
2923                         remove_proc_entry("drbd", NULL);
2924                 i = minor_count;
2925                 while (i--)
2926                         drbd_delete_device(i);
2927                 drbd_destroy_mempools();
2928         }
2929
2930         kfree(minor_table);
2931
2932         unregister_blkdev(DRBD_MAJOR, "drbd");
2933
2934         printk(KERN_INFO "drbd: module cleanup done.\n");
2935 }
2936
2937 /**
2938  * drbd_congested() - Callback for pdflush
2939  * @congested_data:     User data
2940  * @bdi_bits:           Bits pdflush is currently interested in
2941  *
2942  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2943  */
2944 static int drbd_congested(void *congested_data, int bdi_bits)
2945 {
2946         struct drbd_conf *mdev = congested_data;
2947         struct request_queue *q;
2948         char reason = '-';
2949         int r = 0;
2950
2951         if (!__inc_ap_bio_cond(mdev)) {
2952                 /* DRBD has frozen IO */
2953                 r = bdi_bits;
2954                 reason = 'd';
2955                 goto out;
2956         }
2957
2958         if (get_ldev(mdev)) {
2959                 q = bdev_get_queue(mdev->ldev->backing_bdev);
2960                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2961                 put_ldev(mdev);
2962                 if (r)
2963                         reason = 'b';
2964         }
2965
2966         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->flags)) {
2967                 r |= (1 << BDI_async_congested);
2968                 reason = reason == 'b' ? 'a' : 'n';
2969         }
2970
2971 out:
2972         mdev->congestion_reason = reason;
2973         return r;
2974 }
2975
2976 struct drbd_conf *drbd_new_device(unsigned int minor)
2977 {
2978         struct drbd_conf *mdev;
2979         struct gendisk *disk;
2980         struct request_queue *q;
2981
2982         /* GFP_KERNEL, we are outside of all write-out paths */
2983         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2984         if (!mdev)
2985                 return NULL;
2986         if (!zalloc_cpumask_var(&mdev->cpu_mask, GFP_KERNEL))
2987                 goto out_no_cpumask;
2988
2989         mdev->minor = minor;
2990
2991         drbd_init_set_defaults(mdev);
2992
2993         q = blk_alloc_queue(GFP_KERNEL);
2994         if (!q)
2995                 goto out_no_q;
2996         mdev->rq_queue = q;
2997         q->queuedata   = mdev;
2998         blk_queue_max_segment_size(q, DRBD_MAX_SEGMENT_SIZE);
2999
3000         disk = alloc_disk(1);
3001         if (!disk)
3002                 goto out_no_disk;
3003         mdev->vdisk = disk;
3004
3005         set_disk_ro(disk, TRUE);
3006
3007         disk->queue = q;
3008         disk->major = DRBD_MAJOR;
3009         disk->first_minor = minor;
3010         disk->fops = &drbd_ops;
3011         sprintf(disk->disk_name, "drbd%d", minor);
3012         disk->private_data = mdev;
3013
3014         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
3015         /* we have no partitions. we contain only ourselves. */
3016         mdev->this_bdev->bd_contains = mdev->this_bdev;
3017
3018         q->backing_dev_info.congested_fn = drbd_congested;
3019         q->backing_dev_info.congested_data = mdev;
3020
3021         blk_queue_make_request(q, drbd_make_request_26);
3022         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
3023         blk_queue_merge_bvec(q, drbd_merge_bvec);
3024         q->queue_lock = &mdev->req_lock; /* needed since we use */
3025                 /* plugging on a queue, that actually has no requests! */
3026         q->unplug_fn = drbd_unplug_fn;
3027
3028         mdev->md_io_page = alloc_page(GFP_KERNEL);
3029         if (!mdev->md_io_page)
3030                 goto out_no_io_page;
3031
3032         if (drbd_bm_init(mdev))
3033                 goto out_no_bitmap;
3034         /* no need to lock access, we are still initializing this minor device. */
3035         if (!tl_init(mdev))
3036                 goto out_no_tl;
3037
3038         mdev->app_reads_hash = kzalloc(APP_R_HSIZE*sizeof(void *), GFP_KERNEL);
3039         if (!mdev->app_reads_hash)
3040                 goto out_no_app_reads;
3041
3042         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
3043         if (!mdev->current_epoch)
3044                 goto out_no_epoch;
3045
3046         INIT_LIST_HEAD(&mdev->current_epoch->list);
3047         mdev->epochs = 1;
3048
3049         return mdev;
3050
3051 /* out_whatever_else:
3052         kfree(mdev->current_epoch); */
3053 out_no_epoch:
3054         kfree(mdev->app_reads_hash);
3055 out_no_app_reads:
3056         tl_cleanup(mdev);
3057 out_no_tl:
3058         drbd_bm_cleanup(mdev);
3059 out_no_bitmap:
3060         __free_page(mdev->md_io_page);
3061 out_no_io_page:
3062         put_disk(disk);
3063 out_no_disk:
3064         blk_cleanup_queue(q);
3065 out_no_q:
3066         free_cpumask_var(mdev->cpu_mask);
3067 out_no_cpumask:
3068         kfree(mdev);
3069         return NULL;
3070 }
3071
3072 /* counterpart of drbd_new_device.
3073  * last part of drbd_delete_device. */
3074 void drbd_free_mdev(struct drbd_conf *mdev)
3075 {
3076         kfree(mdev->current_epoch);
3077         kfree(mdev->app_reads_hash);
3078         tl_cleanup(mdev);
3079         if (mdev->bitmap) /* should no longer be there. */
3080                 drbd_bm_cleanup(mdev);
3081         __free_page(mdev->md_io_page);
3082         put_disk(mdev->vdisk);
3083         blk_cleanup_queue(mdev->rq_queue);
3084         free_cpumask_var(mdev->cpu_mask);
3085         kfree(mdev);
3086 }
3087
3088
3089 int __init drbd_init(void)
3090 {
3091         int err;
3092
3093         if (sizeof(struct p_handshake) != 80) {
3094                 printk(KERN_ERR
3095                        "drbd: never change the size or layout "
3096                        "of the HandShake packet.\n");
3097                 return -EINVAL;
3098         }
3099
3100         if (1 > minor_count || minor_count > 255) {
3101                 printk(KERN_ERR
3102                         "drbd: invalid minor_count (%d)\n", minor_count);
3103 #ifdef MODULE
3104                 return -EINVAL;
3105 #else
3106                 minor_count = 8;
3107 #endif
3108         }
3109
3110         err = drbd_nl_init();
3111         if (err)
3112                 return err;
3113
3114         err = register_blkdev(DRBD_MAJOR, "drbd");
3115         if (err) {
3116                 printk(KERN_ERR
3117                        "drbd: unable to register block device major %d\n",
3118                        DRBD_MAJOR);
3119                 return err;
3120         }
3121
3122         register_reboot_notifier(&drbd_notifier);
3123
3124         /*
3125          * allocate all necessary structs
3126          */
3127         err = -ENOMEM;
3128
3129         init_waitqueue_head(&drbd_pp_wait);
3130
3131         drbd_proc = NULL; /* play safe for drbd_cleanup */
3132         minor_table = kzalloc(sizeof(struct drbd_conf *)*minor_count,
3133                                 GFP_KERNEL);
3134         if (!minor_table)
3135                 goto Enomem;
3136
3137         err = drbd_create_mempools();
3138         if (err)
3139                 goto Enomem;
3140
3141         drbd_proc = proc_create("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops);
3142         if (!drbd_proc) {
3143                 printk(KERN_ERR "drbd: unable to register proc file\n");
3144                 goto Enomem;
3145         }
3146
3147         rwlock_init(&global_state_lock);
3148
3149         printk(KERN_INFO "drbd: initialized. "
3150                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
3151                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
3152         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
3153         printk(KERN_INFO "drbd: registered as block device major %d\n",
3154                 DRBD_MAJOR);
3155         printk(KERN_INFO "drbd: minor_table @ 0x%p\n", minor_table);
3156
3157         return 0; /* Success! */
3158
3159 Enomem:
3160         drbd_cleanup();
3161         if (err == -ENOMEM)
3162                 /* currently always the case */
3163                 printk(KERN_ERR "drbd: ran out of memory\n");
3164         else
3165                 printk(KERN_ERR "drbd: initialization failure\n");
3166         return err;
3167 }
3168
3169 void drbd_free_bc(struct drbd_backing_dev *ldev)
3170 {
3171         if (ldev == NULL)
3172                 return;
3173
3174         bd_release(ldev->backing_bdev);
3175         bd_release(ldev->md_bdev);
3176
3177         fput(ldev->lo_file);
3178         fput(ldev->md_file);
3179
3180         kfree(ldev);
3181 }
3182
3183 void drbd_free_sock(struct drbd_conf *mdev)
3184 {
3185         if (mdev->data.socket) {
3186                 kernel_sock_shutdown(mdev->data.socket, SHUT_RDWR);
3187                 sock_release(mdev->data.socket);
3188                 mdev->data.socket = NULL;
3189         }
3190         if (mdev->meta.socket) {
3191                 kernel_sock_shutdown(mdev->meta.socket, SHUT_RDWR);
3192                 sock_release(mdev->meta.socket);
3193                 mdev->meta.socket = NULL;
3194         }
3195 }
3196
3197
3198 void drbd_free_resources(struct drbd_conf *mdev)
3199 {
3200         crypto_free_hash(mdev->csums_tfm);
3201         mdev->csums_tfm = NULL;
3202         crypto_free_hash(mdev->verify_tfm);
3203         mdev->verify_tfm = NULL;
3204         crypto_free_hash(mdev->cram_hmac_tfm);
3205         mdev->cram_hmac_tfm = NULL;
3206         crypto_free_hash(mdev->integrity_w_tfm);
3207         mdev->integrity_w_tfm = NULL;
3208         crypto_free_hash(mdev->integrity_r_tfm);
3209         mdev->integrity_r_tfm = NULL;
3210
3211         drbd_free_sock(mdev);
3212
3213         __no_warn(local,
3214                   drbd_free_bc(mdev->ldev);
3215                   mdev->ldev = NULL;);
3216 }
3217
3218 /* meta data management */
3219
3220 struct meta_data_on_disk {
3221         u64 la_size;           /* last agreed size. */
3222         u64 uuid[UI_SIZE];   /* UUIDs. */
3223         u64 device_uuid;
3224         u64 reserved_u64_1;
3225         u32 flags;             /* MDF */
3226         u32 magic;
3227         u32 md_size_sect;
3228         u32 al_offset;         /* offset to this block */
3229         u32 al_nr_extents;     /* important for restoring the AL */
3230               /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3231         u32 bm_offset;         /* offset to the bitmap, from here */
3232         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
3233         u32 reserved_u32[4];
3234
3235 } __packed;
3236
3237 /**
3238  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3239  * @mdev:       DRBD device.
3240  */
3241 void drbd_md_sync(struct drbd_conf *mdev)
3242 {
3243         struct meta_data_on_disk *buffer;
3244         sector_t sector;
3245         int i;
3246
3247         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
3248                 return;
3249         del_timer(&mdev->md_sync_timer);
3250
3251         /* We use here D_FAILED and not D_ATTACHING because we try to write
3252          * metadata even if we detach due to a disk failure! */
3253         if (!get_ldev_if_state(mdev, D_FAILED))
3254                 return;
3255
3256         trace_drbd_md_io(mdev, WRITE, mdev->ldev);
3257
3258         mutex_lock(&mdev->md_io_mutex);
3259         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3260         memset(buffer, 0, 512);
3261
3262         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
3263         for (i = UI_CURRENT; i < UI_SIZE; i++)
3264                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
3265         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
3266         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
3267
3268         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
3269         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
3270         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
3271         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3272         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
3273
3274         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
3275
3276         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
3277         sector = mdev->ldev->md.md_offset;
3278
3279         if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
3280                 clear_bit(MD_DIRTY, &mdev->flags);
3281         } else {
3282                 /* this was a try anyways ... */
3283                 dev_err(DEV, "meta data update failed!\n");
3284
3285                 drbd_chk_io_error(mdev, 1, TRUE);
3286         }
3287
3288         /* Update mdev->ldev->md.la_size_sect,
3289          * since we updated it on metadata. */
3290         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
3291
3292         mutex_unlock(&mdev->md_io_mutex);
3293         put_ldev(mdev);
3294 }
3295
3296 /**
3297  * drbd_md_read() - Reads in the meta data super block
3298  * @mdev:       DRBD device.
3299  * @bdev:       Device from which the meta data should be read in.
3300  *
3301  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
3302  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3303  */
3304 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
3305 {
3306         struct meta_data_on_disk *buffer;
3307         int i, rv = NO_ERROR;
3308
3309         if (!get_ldev_if_state(mdev, D_ATTACHING))
3310                 return ERR_IO_MD_DISK;
3311
3312         trace_drbd_md_io(mdev, READ, bdev);
3313
3314         mutex_lock(&mdev->md_io_mutex);
3315         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
3316
3317         if (!drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
3318                 /* NOTE: cant do normal error processing here as this is
3319                    called BEFORE disk is attached */
3320                 dev_err(DEV, "Error while reading metadata.\n");
3321                 rv = ERR_IO_MD_DISK;
3322                 goto err;
3323         }
3324
3325         if (be32_to_cpu(buffer->magic) != DRBD_MD_MAGIC) {
3326                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
3327                 rv = ERR_MD_INVALID;
3328                 goto err;
3329         }
3330         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
3331                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
3332                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
3333                 rv = ERR_MD_INVALID;
3334                 goto err;
3335         }
3336         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3337                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
3338                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3339                 rv = ERR_MD_INVALID;
3340                 goto err;
3341         }
3342         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3343                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
3344                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3345                 rv = ERR_MD_INVALID;
3346                 goto err;
3347         }
3348
3349         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3350                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3351                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3352                 rv = ERR_MD_INVALID;
3353                 goto err;
3354         }
3355
3356         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
3357         for (i = UI_CURRENT; i < UI_SIZE; i++)
3358                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3359         bdev->md.flags = be32_to_cpu(buffer->flags);
3360         mdev->sync_conf.al_extents = be32_to_cpu(buffer->al_nr_extents);
3361         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3362
3363         if (mdev->sync_conf.al_extents < 7)
3364                 mdev->sync_conf.al_extents = 127;
3365
3366  err:
3367         mutex_unlock(&mdev->md_io_mutex);
3368         put_ldev(mdev);
3369
3370         return rv;
3371 }
3372
3373 /**
3374  * drbd_md_mark_dirty() - Mark meta data super block as dirty
3375  * @mdev:       DRBD device.
3376  *
3377  * Call this function if you change anything that should be written to
3378  * the meta-data super block. This function sets MD_DIRTY, and starts a
3379  * timer that ensures that within five seconds you have to call drbd_md_sync().
3380  */
3381 void drbd_md_mark_dirty(struct drbd_conf *mdev)
3382 {
3383         set_bit(MD_DIRTY, &mdev->flags);
3384         mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
3385 }
3386
3387
3388 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
3389 {
3390         int i;
3391
3392         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++) {
3393                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
3394
3395                 trace_drbd_uuid(mdev, i+1);
3396         }
3397 }
3398
3399 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3400 {
3401         if (idx == UI_CURRENT) {
3402                 if (mdev->state.role == R_PRIMARY)
3403                         val |= 1;
3404                 else
3405                         val &= ~((u64)1);
3406
3407                 drbd_set_ed_uuid(mdev, val);
3408         }
3409
3410         mdev->ldev->md.uuid[idx] = val;
3411         trace_drbd_uuid(mdev, idx);
3412         drbd_md_mark_dirty(mdev);
3413 }
3414
3415
3416 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
3417 {
3418         if (mdev->ldev->md.uuid[idx]) {
3419                 drbd_uuid_move_history(mdev);
3420                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
3421                 trace_drbd_uuid(mdev, UI_HISTORY_START);
3422         }
3423         _drbd_uuid_set(mdev, idx, val);
3424 }
3425
3426 /**
3427  * drbd_uuid_new_current() - Creates a new current UUID
3428  * @mdev:       DRBD device.
3429  *
3430  * Creates a new current UUID, and rotates the old current UUID into
3431  * the bitmap slot. Causes an incremental resync upon next connect.
3432  */
3433 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
3434 {
3435         u64 val;
3436
3437         dev_info(DEV, "Creating new current UUID\n");
3438         D_ASSERT(mdev->ldev->md.uuid[UI_BITMAP] == 0);
3439         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3440         trace_drbd_uuid(mdev, UI_BITMAP);
3441
3442         get_random_bytes(&val, sizeof(u64));
3443         _drbd_uuid_set(mdev, UI_CURRENT, val);
3444 }
3445
3446 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3447 {
3448         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3449                 return;
3450
3451         if (val == 0) {
3452                 drbd_uuid_move_history(mdev);
3453                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3454                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3455                 trace_drbd_uuid(mdev, UI_HISTORY_START);
3456                 trace_drbd_uuid(mdev, UI_BITMAP);
3457         } else {
3458                 if (mdev->ldev->md.uuid[UI_BITMAP])
3459                         dev_warn(DEV, "bm UUID already set");
3460
3461                 mdev->ldev->md.uuid[UI_BITMAP] = val;
3462                 mdev->ldev->md.uuid[UI_BITMAP] &= ~((u64)1);
3463
3464                 trace_drbd_uuid(mdev, UI_BITMAP);
3465         }
3466         drbd_md_mark_dirty(mdev);
3467 }
3468
3469 /**
3470  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3471  * @mdev:       DRBD device.
3472  *
3473  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3474  */
3475 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3476 {
3477         int rv = -EIO;
3478
3479         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3480                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3481                 drbd_md_sync(mdev);
3482                 drbd_bm_set_all(mdev);
3483
3484                 rv = drbd_bm_write(mdev);
3485
3486                 if (!rv) {
3487                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3488                         drbd_md_sync(mdev);
3489                 }
3490
3491                 put_ldev(mdev);
3492         }
3493
3494         return rv;
3495 }
3496
3497 /**
3498  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3499  * @mdev:       DRBD device.
3500  *
3501  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3502  */
3503 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3504 {
3505         int rv = -EIO;
3506
3507         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3508                 drbd_bm_clear_all(mdev);
3509                 rv = drbd_bm_write(mdev);
3510                 put_ldev(mdev);
3511         }
3512
3513         return rv;
3514 }
3515
3516 static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3517 {
3518         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3519         int rv;
3520
3521         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3522
3523         drbd_bm_lock(mdev, work->why);
3524         rv = work->io_fn(mdev);
3525         drbd_bm_unlock(mdev);
3526
3527         clear_bit(BITMAP_IO, &mdev->flags);
3528         wake_up(&mdev->misc_wait);
3529
3530         if (work->done)
3531                 work->done(mdev, rv);
3532
3533         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3534         work->why = NULL;
3535
3536         return 1;
3537 }
3538
3539 /**
3540  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3541  * @mdev:       DRBD device.
3542  * @io_fn:      IO callback to be called when bitmap IO is possible
3543  * @done:       callback to be called after the bitmap IO was performed
3544  * @why:        Descriptive text of the reason for doing the IO
3545  *
3546  * While IO on the bitmap happens we freeze application IO thus we ensure
3547  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3548  * called from worker context. It MUST NOT be used while a previous such
3549  * work is still pending!
3550  */
3551 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3552                           int (*io_fn)(struct drbd_conf *),
3553                           void (*done)(struct drbd_conf *, int),
3554                           char *why)
3555 {
3556         D_ASSERT(current == mdev->worker.task);
3557
3558         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3559         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3560         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3561         if (mdev->bm_io_work.why)
3562                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3563                         why, mdev->bm_io_work.why);
3564
3565         mdev->bm_io_work.io_fn = io_fn;
3566         mdev->bm_io_work.done = done;
3567         mdev->bm_io_work.why = why;
3568
3569         set_bit(BITMAP_IO, &mdev->flags);
3570         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3571                 if (list_empty(&mdev->bm_io_work.w.list)) {
3572                         set_bit(BITMAP_IO_QUEUED, &mdev->flags);
3573                         drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w);
3574                 } else
3575                         dev_err(DEV, "FIXME avoided double queuing bm_io_work\n");
3576         }
3577 }
3578
3579 /**
3580  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3581  * @mdev:       DRBD device.
3582  * @io_fn:      IO callback to be called when bitmap IO is possible
3583  * @why:        Descriptive text of the reason for doing the IO
3584  *
3585  * freezes application IO while that the actual IO operations runs. This
3586  * functions MAY NOT be called from worker context.
3587  */
3588 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why)
3589 {
3590         int rv;
3591
3592         D_ASSERT(current != mdev->worker.task);
3593
3594         drbd_suspend_io(mdev);
3595
3596         drbd_bm_lock(mdev, why);
3597         rv = io_fn(mdev);
3598         drbd_bm_unlock(mdev);
3599
3600         drbd_resume_io(mdev);
3601
3602         return rv;
3603 }
3604
3605 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3606 {
3607         if ((mdev->ldev->md.flags & flag) != flag) {
3608                 drbd_md_mark_dirty(mdev);
3609                 mdev->ldev->md.flags |= flag;
3610         }
3611 }
3612
3613 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3614 {
3615         if ((mdev->ldev->md.flags & flag) != 0) {
3616                 drbd_md_mark_dirty(mdev);
3617                 mdev->ldev->md.flags &= ~flag;
3618         }
3619 }
3620 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3621 {
3622         return (bdev->md.flags & flag) != 0;
3623 }
3624
3625 static void md_sync_timer_fn(unsigned long data)
3626 {
3627         struct drbd_conf *mdev = (struct drbd_conf *) data;
3628
3629         drbd_queue_work_front(&mdev->data.work, &mdev->md_sync_work);
3630 }
3631
3632 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
3633 {
3634         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3635         drbd_md_sync(mdev);
3636
3637         return 1;
3638 }
3639
3640 #ifdef CONFIG_DRBD_FAULT_INJECTION
3641 /* Fault insertion support including random number generator shamelessly
3642  * stolen from kernel/rcutorture.c */
3643 struct fault_random_state {
3644         unsigned long state;
3645         unsigned long count;
3646 };
3647
3648 #define FAULT_RANDOM_MULT 39916801  /* prime */
3649 #define FAULT_RANDOM_ADD        479001701 /* prime */
3650 #define FAULT_RANDOM_REFRESH 10000
3651
3652 /*
3653  * Crude but fast random-number generator.  Uses a linear congruential
3654  * generator, with occasional help from get_random_bytes().
3655  */
3656 static unsigned long
3657 _drbd_fault_random(struct fault_random_state *rsp)
3658 {
3659         long refresh;
3660
3661         if (--rsp->count < 0) {
3662                 get_random_bytes(&refresh, sizeof(refresh));
3663                 rsp->state += refresh;
3664                 rsp->count = FAULT_RANDOM_REFRESH;
3665         }
3666         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3667         return swahw32(rsp->state);
3668 }
3669
3670 static char *
3671 _drbd_fault_str(unsigned int type) {
3672         static char *_faults[] = {
3673                 [DRBD_FAULT_MD_WR] = "Meta-data write",
3674                 [DRBD_FAULT_MD_RD] = "Meta-data read",
3675                 [DRBD_FAULT_RS_WR] = "Resync write",
3676                 [DRBD_FAULT_RS_RD] = "Resync read",
3677                 [DRBD_FAULT_DT_WR] = "Data write",
3678                 [DRBD_FAULT_DT_RD] = "Data read",
3679                 [DRBD_FAULT_DT_RA] = "Data read ahead",
3680                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3681                 [DRBD_FAULT_AL_EE] = "EE allocation"
3682         };
3683
3684         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3685 }
3686
3687 unsigned int
3688 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3689 {
3690         static struct fault_random_state rrs = {0, 0};
3691
3692         unsigned int ret = (
3693                 (fault_devs == 0 ||
3694                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3695                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3696
3697         if (ret) {
3698                 fault_count++;
3699
3700                 if (printk_ratelimit())
3701                         dev_warn(DEV, "***Simulating %s failure\n",
3702                                 _drbd_fault_str(type));
3703         }
3704
3705         return ret;
3706 }
3707 #endif
3708
3709 const char *drbd_buildtag(void)
3710 {
3711         /* DRBD built from external sources has here a reference to the
3712            git hash of the source code. */
3713
3714         static char buildtag[38] = "\0uilt-in";
3715
3716         if (buildtag[0] == 0) {
3717 #ifdef CONFIG_MODULES
3718                 if (THIS_MODULE != NULL)
3719                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3720                 else
3721 #endif
3722                         buildtag[0] = 'b';
3723         }
3724
3725         return buildtag;
3726 }
3727
3728 module_init(drbd_init)
3729 module_exit(drbd_cleanup)
3730
3731 /* For drbd_tracing: */
3732 EXPORT_SYMBOL(drbd_conn_str);
3733 EXPORT_SYMBOL(drbd_role_str);
3734 EXPORT_SYMBOL(drbd_disk_str);
3735 EXPORT_SYMBOL(drbd_set_st_err_str);