drbd: Code de-duplication; new function apply_mask_val()
[linux-block.git] / drivers / block / drbd / drbd_state.c
1 /*
2    drbd_state.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26  */
27
28 #include <linux/drbd_limits.h>
29 #include "drbd_int.h"
30 #include "drbd_req.h"
31
32 struct after_state_chg_work {
33         struct drbd_work w;
34         union drbd_state os;
35         union drbd_state ns;
36         enum chg_state_flags flags;
37         struct completion *done;
38 };
39
40
41 extern void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
42 int drbd_send_state_req(struct drbd_conf *, union drbd_state, union drbd_state);
43 static int w_after_state_ch(struct drbd_work *w, int unused);
44 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
45                            union drbd_state ns, enum chg_state_flags flags);
46 static void after_conn_state_ch(struct drbd_tconn *tconn, union drbd_state os,
47                                 union drbd_state ns, enum chg_state_flags flags);
48 static enum drbd_state_rv is_valid_state(struct drbd_conf *, union drbd_state);
49 static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state);
50 static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
51 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
52                                        const char **warn_sync_abort);
53
54 /**
55  * cl_wide_st_chg() - true if the state change is a cluster wide one
56  * @mdev:       DRBD device.
57  * @os:         old (current) state.
58  * @ns:         new (wanted) state.
59  */
60 static int cl_wide_st_chg(struct drbd_conf *mdev,
61                           union drbd_state os, union drbd_state ns)
62 {
63         return (os.conn >= C_CONNECTED && ns.conn >= C_CONNECTED &&
64                  ((os.role != R_PRIMARY && ns.role == R_PRIMARY) ||
65                   (os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
66                   (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S) ||
67                   (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))) ||
68                 (os.conn >= C_CONNECTED && ns.conn == C_DISCONNECTING) ||
69                 (os.conn == C_CONNECTED && ns.conn == C_VERIFY_S);
70 }
71
72 static union drbd_state
73 apply_mask_val(union drbd_state os, union drbd_state mask, union drbd_state val)
74 {
75         union drbd_state ns;
76         ns.i = (os.i & ~mask.i) | val.i;
77         return ns;
78 }
79
80 enum drbd_state_rv
81 drbd_change_state(struct drbd_conf *mdev, enum chg_state_flags f,
82                   union drbd_state mask, union drbd_state val)
83 {
84         unsigned long flags;
85         union drbd_state ns;
86         enum drbd_state_rv rv;
87
88         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
89         ns = apply_mask_val(mdev->state, mask, val);
90         rv = _drbd_set_state(mdev, ns, f, NULL);
91         ns = mdev->state;
92         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
93
94         return rv;
95 }
96
97 /**
98  * drbd_force_state() - Impose a change which happens outside our control on our state
99  * @mdev:       DRBD device.
100  * @mask:       mask of state bits to change.
101  * @val:        value of new state bits.
102  */
103 void drbd_force_state(struct drbd_conf *mdev,
104         union drbd_state mask, union drbd_state val)
105 {
106         drbd_change_state(mdev, CS_HARD, mask, val);
107 }
108
109 static enum drbd_state_rv
110 _req_st_cond(struct drbd_conf *mdev, union drbd_state mask,
111              union drbd_state val)
112 {
113         union drbd_state os, ns;
114         unsigned long flags;
115         enum drbd_state_rv rv;
116
117         if (test_and_clear_bit(CL_ST_CHG_SUCCESS, &mdev->flags))
118                 return SS_CW_SUCCESS;
119
120         if (test_and_clear_bit(CL_ST_CHG_FAIL, &mdev->flags))
121                 return SS_CW_FAILED_BY_PEER;
122
123         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
124         os = mdev->state;
125         ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
126         rv = is_valid_transition(os, ns);
127         if (rv == SS_SUCCESS)
128                 rv = SS_UNKNOWN_ERROR;  /* cont waiting, otherwise fail. */
129
130         if (!cl_wide_st_chg(mdev, os, ns))
131                 rv = SS_CW_NO_NEED;
132         if (rv == SS_UNKNOWN_ERROR) {
133                 rv = is_valid_state(mdev, ns);
134                 if (rv == SS_SUCCESS) {
135                         rv = is_valid_soft_transition(os, ns);
136                         if (rv == SS_SUCCESS)
137                                 rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
138                 }
139         }
140         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
141
142         return rv;
143 }
144
145 /**
146  * drbd_req_state() - Perform an eventually cluster wide state change
147  * @mdev:       DRBD device.
148  * @mask:       mask of state bits to change.
149  * @val:        value of new state bits.
150  * @f:          flags
151  *
152  * Should not be called directly, use drbd_request_state() or
153  * _drbd_request_state().
154  */
155 static enum drbd_state_rv
156 drbd_req_state(struct drbd_conf *mdev, union drbd_state mask,
157                union drbd_state val, enum chg_state_flags f)
158 {
159         struct completion done;
160         unsigned long flags;
161         union drbd_state os, ns;
162         enum drbd_state_rv rv;
163
164         init_completion(&done);
165
166         if (f & CS_SERIALIZE)
167                 mutex_lock(&mdev->state_mutex);
168
169         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
170         os = mdev->state;
171         ns = sanitize_state(mdev, apply_mask_val(os, mask, val), NULL);
172         rv = is_valid_transition(os, ns);
173         if (rv < SS_SUCCESS)
174                 goto abort;
175
176         if (cl_wide_st_chg(mdev, os, ns)) {
177                 rv = is_valid_state(mdev, ns);
178                 if (rv == SS_SUCCESS)
179                         rv = is_valid_soft_transition(os, ns);
180                 spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
181
182                 if (rv < SS_SUCCESS) {
183                         if (f & CS_VERBOSE)
184                                 print_st_err(mdev, os, ns, rv);
185                         goto abort;
186                 }
187
188                 drbd_state_lock(mdev);
189                 if (!drbd_send_state_req(mdev, mask, val)) {
190                         drbd_state_unlock(mdev);
191                         rv = SS_CW_FAILED_BY_PEER;
192                         if (f & CS_VERBOSE)
193                                 print_st_err(mdev, os, ns, rv);
194                         goto abort;
195                 }
196
197                 wait_event(mdev->state_wait,
198                         (rv = _req_st_cond(mdev, mask, val)));
199
200                 if (rv < SS_SUCCESS) {
201                         drbd_state_unlock(mdev);
202                         if (f & CS_VERBOSE)
203                                 print_st_err(mdev, os, ns, rv);
204                         goto abort;
205                 }
206                 spin_lock_irqsave(&mdev->tconn->req_lock, flags);
207                 ns = apply_mask_val(mdev->state, mask, val);
208                 rv = _drbd_set_state(mdev, ns, f, &done);
209                 drbd_state_unlock(mdev);
210         } else {
211                 rv = _drbd_set_state(mdev, ns, f, &done);
212         }
213
214         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
215
216         if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
217                 D_ASSERT(current != mdev->tconn->worker.task);
218                 wait_for_completion(&done);
219         }
220
221 abort:
222         if (f & CS_SERIALIZE)
223                 mutex_unlock(&mdev->state_mutex);
224
225         return rv;
226 }
227
228 /**
229  * _drbd_request_state() - Request a state change (with flags)
230  * @mdev:       DRBD device.
231  * @mask:       mask of state bits to change.
232  * @val:        value of new state bits.
233  * @f:          flags
234  *
235  * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
236  * flag, or when logging of failed state change requests is not desired.
237  */
238 enum drbd_state_rv
239 _drbd_request_state(struct drbd_conf *mdev, union drbd_state mask,
240                     union drbd_state val, enum chg_state_flags f)
241 {
242         enum drbd_state_rv rv;
243
244         wait_event(mdev->state_wait,
245                    (rv = drbd_req_state(mdev, mask, val, f)) != SS_IN_TRANSIENT_STATE);
246
247         return rv;
248 }
249
250 static void print_st(struct drbd_conf *mdev, char *name, union drbd_state ns)
251 {
252         dev_err(DEV, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
253             name,
254             drbd_conn_str(ns.conn),
255             drbd_role_str(ns.role),
256             drbd_role_str(ns.peer),
257             drbd_disk_str(ns.disk),
258             drbd_disk_str(ns.pdsk),
259             is_susp(ns) ? 's' : 'r',
260             ns.aftr_isp ? 'a' : '-',
261             ns.peer_isp ? 'p' : '-',
262             ns.user_isp ? 'u' : '-',
263             ns.susp_fen ? 'F' : '-',
264             ns.susp_nod ? 'N' : '-'
265             );
266 }
267
268 void print_st_err(struct drbd_conf *mdev, union drbd_state os,
269                   union drbd_state ns, enum drbd_state_rv err)
270 {
271         if (err == SS_IN_TRANSIENT_STATE)
272                 return;
273         dev_err(DEV, "State change failed: %s\n", drbd_set_st_err_str(err));
274         print_st(mdev, " state", os);
275         print_st(mdev, "wanted", ns);
276 }
277
278
279 /**
280  * is_valid_state() - Returns an SS_ error code if ns is not valid
281  * @mdev:       DRBD device.
282  * @ns:         State to consider.
283  */
284 static enum drbd_state_rv
285 is_valid_state(struct drbd_conf *mdev, union drbd_state ns)
286 {
287         /* See drbd_state_sw_errors in drbd_strings.c */
288
289         enum drbd_fencing_p fp;
290         enum drbd_state_rv rv = SS_SUCCESS;
291
292         fp = FP_DONT_CARE;
293         if (get_ldev(mdev)) {
294                 fp = mdev->ldev->dc.fencing;
295                 put_ldev(mdev);
296         }
297
298         if (get_net_conf(mdev->tconn)) {
299                 if (!mdev->tconn->net_conf->two_primaries &&
300                     ns.role == R_PRIMARY && ns.peer == R_PRIMARY)
301                         rv = SS_TWO_PRIMARIES;
302                 put_net_conf(mdev->tconn);
303         }
304
305         if (rv <= 0)
306                 /* already found a reason to abort */;
307         else if (ns.role == R_SECONDARY && mdev->open_cnt)
308                 rv = SS_DEVICE_IN_USE;
309
310         else if (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.disk < D_UP_TO_DATE)
311                 rv = SS_NO_UP_TO_DATE_DISK;
312
313         else if (fp >= FP_RESOURCE &&
314                  ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk >= D_UNKNOWN)
315                 rv = SS_PRIMARY_NOP;
316
317         else if (ns.role == R_PRIMARY && ns.disk <= D_INCONSISTENT && ns.pdsk <= D_INCONSISTENT)
318                 rv = SS_NO_UP_TO_DATE_DISK;
319
320         else if (ns.conn > C_CONNECTED && ns.disk < D_INCONSISTENT)
321                 rv = SS_NO_LOCAL_DISK;
322
323         else if (ns.conn > C_CONNECTED && ns.pdsk < D_INCONSISTENT)
324                 rv = SS_NO_REMOTE_DISK;
325
326         else if (ns.conn > C_CONNECTED && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
327                 rv = SS_NO_UP_TO_DATE_DISK;
328
329         else if ((ns.conn == C_CONNECTED ||
330                   ns.conn == C_WF_BITMAP_S ||
331                   ns.conn == C_SYNC_SOURCE ||
332                   ns.conn == C_PAUSED_SYNC_S) &&
333                   ns.disk == D_OUTDATED)
334                 rv = SS_CONNECTED_OUTDATES;
335
336         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
337                  (mdev->sync_conf.verify_alg[0] == 0))
338                 rv = SS_NO_VERIFY_ALG;
339
340         else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
341                   mdev->tconn->agreed_pro_version < 88)
342                 rv = SS_NOT_SUPPORTED;
343
344         else if (ns.conn >= C_CONNECTED && ns.pdsk == D_UNKNOWN)
345                 rv = SS_CONNECTED_OUTDATES;
346
347         return rv;
348 }
349
350 /**
351  * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
352  * This function limits state transitions that may be declined by DRBD. I.e.
353  * user requests (aka soft transitions).
354  * @mdev:       DRBD device.
355  * @ns:         new state.
356  * @os:         old state.
357  */
358 static enum drbd_state_rv
359 is_valid_soft_transition(union drbd_state os, union drbd_state ns)
360 {
361         enum drbd_state_rv rv = SS_SUCCESS;
362
363         if ((ns.conn == C_STARTING_SYNC_T || ns.conn == C_STARTING_SYNC_S) &&
364             os.conn > C_CONNECTED)
365                 rv = SS_RESYNC_RUNNING;
366
367         if (ns.conn == C_DISCONNECTING && os.conn == C_STANDALONE)
368                 rv = SS_ALREADY_STANDALONE;
369
370         if (ns.disk > D_ATTACHING && os.disk == D_DISKLESS)
371                 rv = SS_IS_DISKLESS;
372
373         if (ns.conn == C_WF_CONNECTION && os.conn < C_UNCONNECTED)
374                 rv = SS_NO_NET_CONFIG;
375
376         if (ns.disk == D_OUTDATED && os.disk < D_OUTDATED && os.disk != D_ATTACHING)
377                 rv = SS_LOWER_THAN_OUTDATED;
378
379         if (ns.conn == C_DISCONNECTING && os.conn == C_UNCONNECTED)
380                 rv = SS_IN_TRANSIENT_STATE;
381
382         if (ns.conn == os.conn && ns.conn == C_WF_REPORT_PARAMS)
383                 rv = SS_IN_TRANSIENT_STATE;
384
385         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && os.conn < C_CONNECTED)
386                 rv = SS_NEED_CONNECTION;
387
388         if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
389             ns.conn != os.conn && os.conn > C_CONNECTED)
390                 rv = SS_RESYNC_RUNNING;
391
392         if ((ns.conn == C_STARTING_SYNC_S || ns.conn == C_STARTING_SYNC_T) &&
393             os.conn < C_CONNECTED)
394                 rv = SS_NEED_CONNECTION;
395
396         if ((ns.conn == C_SYNC_TARGET || ns.conn == C_SYNC_SOURCE)
397             && os.conn < C_WF_REPORT_PARAMS)
398                 rv = SS_NEED_CONNECTION; /* No NetworkFailure -> SyncTarget etc... */
399
400         return rv;
401 }
402
403 static enum drbd_state_rv
404 is_valid_conn_transition(enum drbd_conns oc, enum drbd_conns nc)
405 {
406         enum drbd_state_rv rv = SS_SUCCESS;
407
408         /* Disallow Network errors to configure a device's network part */
409         if ((nc >= C_TIMEOUT && nc <= C_TEAR_DOWN) && oc <= C_DISCONNECTING)
410                 rv = SS_NEED_CONNECTION;
411
412         /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
413         if (oc >= C_TIMEOUT && oc <= C_TEAR_DOWN && nc != C_UNCONNECTED && nc != C_DISCONNECTING)
414                 rv = SS_IN_TRANSIENT_STATE;
415
416         /* After C_DISCONNECTING only C_STANDALONE may follow */
417         if (oc == C_DISCONNECTING && nc != C_STANDALONE)
418                 rv = SS_IN_TRANSIENT_STATE;
419
420         return rv;
421 }
422
423
424 /**
425  * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
426  * This limits hard state transitions. Hard state transitions are facts there are
427  * imposed on DRBD by the environment. E.g. disk broke or network broke down.
428  * But those hard state transitions are still not allowed to do everything.
429  * @ns:         new state.
430  * @os:         old state.
431  */
432 static enum drbd_state_rv
433 is_valid_transition(union drbd_state os, union drbd_state ns)
434 {
435         enum drbd_state_rv rv;
436
437         rv = is_valid_conn_transition(os.conn, ns.conn);
438
439         /* we cannot fail (again) if we already detached */
440         if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
441                 rv = SS_IS_DISKLESS;
442
443         /* if we are only D_ATTACHING yet,
444          * we can (and should) go directly to D_DISKLESS. */
445         if (ns.disk == D_FAILED && os.disk == D_ATTACHING) {
446                 printk("TODO: FIX ME\n");
447                 rv = SS_IS_DISKLESS;
448         }
449
450         return rv;
451 }
452
453 /**
454  * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
455  * @mdev:       DRBD device.
456  * @os:         old state.
457  * @ns:         new state.
458  * @warn_sync_abort:
459  *
460  * When we loose connection, we have to set the state of the peers disk (pdsk)
461  * to D_UNKNOWN. This rule and many more along those lines are in this function.
462  */
463 static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ns,
464                                        const char **warn_sync_abort)
465 {
466         enum drbd_fencing_p fp;
467         enum drbd_disk_state disk_min, disk_max, pdsk_min, pdsk_max;
468
469         fp = FP_DONT_CARE;
470         if (get_ldev(mdev)) {
471                 fp = mdev->ldev->dc.fencing;
472                 put_ldev(mdev);
473         }
474
475         /* Implications from connection to peer and peer_isp */
476         if (ns.conn < C_CONNECTED) {
477                 ns.peer_isp = 0;
478                 ns.peer = R_UNKNOWN;
479                 if (ns.pdsk > D_UNKNOWN || ns.pdsk < D_INCONSISTENT)
480                         ns.pdsk = D_UNKNOWN;
481         }
482
483         /* Clear the aftr_isp when becoming unconfigured */
484         if (ns.conn == C_STANDALONE && ns.disk == D_DISKLESS && ns.role == R_SECONDARY)
485                 ns.aftr_isp = 0;
486
487         /* An implication of the disk states onto the connection state */
488         /* Abort resync if a disk fails/detaches */
489         if (ns.conn > C_CONNECTED && (ns.disk <= D_FAILED || ns.pdsk <= D_FAILED)) {
490                 if (warn_sync_abort)
491                         *warn_sync_abort =
492                                 ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T ?
493                                 "Online-verify" : "Resync";
494                 ns.conn = C_CONNECTED;
495         }
496
497         /* Connection breaks down before we finished "Negotiating" */
498         if (ns.conn < C_CONNECTED && ns.disk == D_NEGOTIATING &&
499             get_ldev_if_state(mdev, D_NEGOTIATING)) {
500                 if (mdev->ed_uuid == mdev->ldev->md.uuid[UI_CURRENT]) {
501                         ns.disk = mdev->new_state_tmp.disk;
502                         ns.pdsk = mdev->new_state_tmp.pdsk;
503                 } else {
504                         dev_alert(DEV, "Connection lost while negotiating, no data!\n");
505                         ns.disk = D_DISKLESS;
506                         ns.pdsk = D_UNKNOWN;
507                 }
508                 put_ldev(mdev);
509         }
510
511         /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
512         if (ns.conn >= C_CONNECTED && ns.conn < C_AHEAD) {
513                 if (ns.disk == D_CONSISTENT || ns.disk == D_OUTDATED)
514                         ns.disk = D_UP_TO_DATE;
515                 if (ns.pdsk == D_CONSISTENT || ns.pdsk == D_OUTDATED)
516                         ns.pdsk = D_UP_TO_DATE;
517         }
518
519         /* Implications of the connection stat on the disk states */
520         disk_min = D_DISKLESS;
521         disk_max = D_UP_TO_DATE;
522         pdsk_min = D_INCONSISTENT;
523         pdsk_max = D_UNKNOWN;
524         switch ((enum drbd_conns)ns.conn) {
525         case C_WF_BITMAP_T:
526         case C_PAUSED_SYNC_T:
527         case C_STARTING_SYNC_T:
528         case C_WF_SYNC_UUID:
529         case C_BEHIND:
530                 disk_min = D_INCONSISTENT;
531                 disk_max = D_OUTDATED;
532                 pdsk_min = D_UP_TO_DATE;
533                 pdsk_max = D_UP_TO_DATE;
534                 break;
535         case C_VERIFY_S:
536         case C_VERIFY_T:
537                 disk_min = D_UP_TO_DATE;
538                 disk_max = D_UP_TO_DATE;
539                 pdsk_min = D_UP_TO_DATE;
540                 pdsk_max = D_UP_TO_DATE;
541                 break;
542         case C_CONNECTED:
543                 disk_min = D_DISKLESS;
544                 disk_max = D_UP_TO_DATE;
545                 pdsk_min = D_DISKLESS;
546                 pdsk_max = D_UP_TO_DATE;
547                 break;
548         case C_WF_BITMAP_S:
549         case C_PAUSED_SYNC_S:
550         case C_STARTING_SYNC_S:
551         case C_AHEAD:
552                 disk_min = D_UP_TO_DATE;
553                 disk_max = D_UP_TO_DATE;
554                 pdsk_min = D_INCONSISTENT;
555                 pdsk_max = D_CONSISTENT; /* D_OUTDATED would be nice. But explicit outdate necessary*/
556                 break;
557         case C_SYNC_TARGET:
558                 disk_min = D_INCONSISTENT;
559                 disk_max = D_INCONSISTENT;
560                 pdsk_min = D_UP_TO_DATE;
561                 pdsk_max = D_UP_TO_DATE;
562                 break;
563         case C_SYNC_SOURCE:
564                 disk_min = D_UP_TO_DATE;
565                 disk_max = D_UP_TO_DATE;
566                 pdsk_min = D_INCONSISTENT;
567                 pdsk_max = D_INCONSISTENT;
568                 break;
569         case C_STANDALONE:
570         case C_DISCONNECTING:
571         case C_UNCONNECTED:
572         case C_TIMEOUT:
573         case C_BROKEN_PIPE:
574         case C_NETWORK_FAILURE:
575         case C_PROTOCOL_ERROR:
576         case C_TEAR_DOWN:
577         case C_WF_CONNECTION:
578         case C_WF_REPORT_PARAMS:
579         case C_MASK:
580                 break;
581         }
582         if (ns.disk > disk_max)
583                 ns.disk = disk_max;
584
585         if (ns.disk < disk_min) {
586                 dev_warn(DEV, "Implicitly set disk from %s to %s\n",
587                          drbd_disk_str(ns.disk), drbd_disk_str(disk_min));
588                 ns.disk = disk_min;
589         }
590         if (ns.pdsk > pdsk_max)
591                 ns.pdsk = pdsk_max;
592
593         if (ns.pdsk < pdsk_min) {
594                 dev_warn(DEV, "Implicitly set pdsk from %s to %s\n",
595                          drbd_disk_str(ns.pdsk), drbd_disk_str(pdsk_min));
596                 ns.pdsk = pdsk_min;
597         }
598
599         if (fp == FP_STONITH &&
600             (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
601                 ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
602
603         if (mdev->sync_conf.on_no_data == OND_SUSPEND_IO &&
604             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
605                 ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
606
607         if (ns.aftr_isp || ns.peer_isp || ns.user_isp) {
608                 if (ns.conn == C_SYNC_SOURCE)
609                         ns.conn = C_PAUSED_SYNC_S;
610                 if (ns.conn == C_SYNC_TARGET)
611                         ns.conn = C_PAUSED_SYNC_T;
612         } else {
613                 if (ns.conn == C_PAUSED_SYNC_S)
614                         ns.conn = C_SYNC_SOURCE;
615                 if (ns.conn == C_PAUSED_SYNC_T)
616                         ns.conn = C_SYNC_TARGET;
617         }
618
619         return ns;
620 }
621
622 void drbd_resume_al(struct drbd_conf *mdev)
623 {
624         if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
625                 dev_info(DEV, "Resumed AL updates\n");
626 }
627
628 /* helper for __drbd_set_state */
629 static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
630 {
631         if (mdev->tconn->agreed_pro_version < 90)
632                 mdev->ov_start_sector = 0;
633         mdev->rs_total = drbd_bm_bits(mdev);
634         mdev->ov_position = 0;
635         if (cs == C_VERIFY_T) {
636                 /* starting online verify from an arbitrary position
637                  * does not fit well into the existing protocol.
638                  * on C_VERIFY_T, we initialize ov_left and friends
639                  * implicitly in receive_DataRequest once the
640                  * first P_OV_REQUEST is received */
641                 mdev->ov_start_sector = ~(sector_t)0;
642         } else {
643                 unsigned long bit = BM_SECT_TO_BIT(mdev->ov_start_sector);
644                 if (bit >= mdev->rs_total) {
645                         mdev->ov_start_sector =
646                                 BM_BIT_TO_SECT(mdev->rs_total - 1);
647                         mdev->rs_total = 1;
648                 } else
649                         mdev->rs_total -= bit;
650                 mdev->ov_position = mdev->ov_start_sector;
651         }
652         mdev->ov_left = mdev->rs_total;
653 }
654
655 /**
656  * __drbd_set_state() - Set a new DRBD state
657  * @mdev:       DRBD device.
658  * @ns:         new state.
659  * @flags:      Flags
660  * @done:       Optional completion, that will get completed after the after_state_ch() finished
661  *
662  * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
663  */
664 enum drbd_state_rv
665 __drbd_set_state(struct drbd_conf *mdev, union drbd_state ns,
666                  enum chg_state_flags flags, struct completion *done)
667 {
668         union drbd_state os;
669         enum drbd_state_rv rv = SS_SUCCESS;
670         const char *warn_sync_abort = NULL;
671         struct after_state_chg_work *ascw;
672
673         os = mdev->state;
674
675         ns = sanitize_state(mdev, ns, &warn_sync_abort);
676         if (ns.i == os.i)
677                 return SS_NOTHING_TO_DO;
678
679         rv = is_valid_transition(os, ns);
680         if (rv < SS_SUCCESS)
681                 return rv;
682
683         if (!(flags & CS_HARD)) {
684                 /*  pre-state-change checks ; only look at ns  */
685                 /* See drbd_state_sw_errors in drbd_strings.c */
686
687                 rv = is_valid_state(mdev, ns);
688                 if (rv < SS_SUCCESS) {
689                         /* If the old state was illegal as well, then let
690                            this happen...*/
691
692                         if (is_valid_state(mdev, os) == rv)
693                                 rv = is_valid_soft_transition(os, ns);
694                 } else
695                         rv = is_valid_soft_transition(os, ns);
696         }
697
698         if (rv < SS_SUCCESS) {
699                 if (flags & CS_VERBOSE)
700                         print_st_err(mdev, os, ns, rv);
701                 return rv;
702         }
703
704         if (warn_sync_abort)
705                 dev_warn(DEV, "%s aborted.\n", warn_sync_abort);
706
707         {
708         char *pbp, pb[300];
709         pbp = pb;
710         *pbp = 0;
711         if (ns.role != os.role)
712                 pbp += sprintf(pbp, "role( %s -> %s ) ",
713                                drbd_role_str(os.role),
714                                drbd_role_str(ns.role));
715         if (ns.peer != os.peer)
716                 pbp += sprintf(pbp, "peer( %s -> %s ) ",
717                                drbd_role_str(os.peer),
718                                drbd_role_str(ns.peer));
719         if (ns.conn != os.conn)
720                 pbp += sprintf(pbp, "conn( %s -> %s ) ",
721                                drbd_conn_str(os.conn),
722                                drbd_conn_str(ns.conn));
723         if (ns.disk != os.disk)
724                 pbp += sprintf(pbp, "disk( %s -> %s ) ",
725                                drbd_disk_str(os.disk),
726                                drbd_disk_str(ns.disk));
727         if (ns.pdsk != os.pdsk)
728                 pbp += sprintf(pbp, "pdsk( %s -> %s ) ",
729                                drbd_disk_str(os.pdsk),
730                                drbd_disk_str(ns.pdsk));
731         if (is_susp(ns) != is_susp(os))
732                 pbp += sprintf(pbp, "susp( %d -> %d ) ",
733                                is_susp(os),
734                                is_susp(ns));
735         if (ns.aftr_isp != os.aftr_isp)
736                 pbp += sprintf(pbp, "aftr_isp( %d -> %d ) ",
737                                os.aftr_isp,
738                                ns.aftr_isp);
739         if (ns.peer_isp != os.peer_isp)
740                 pbp += sprintf(pbp, "peer_isp( %d -> %d ) ",
741                                os.peer_isp,
742                                ns.peer_isp);
743         if (ns.user_isp != os.user_isp)
744                 pbp += sprintf(pbp, "user_isp( %d -> %d ) ",
745                                os.user_isp,
746                                ns.user_isp);
747         dev_info(DEV, "%s\n", pb);
748         }
749
750         /* solve the race between becoming unconfigured,
751          * worker doing the cleanup, and
752          * admin reconfiguring us:
753          * on (re)configure, first set CONFIG_PENDING,
754          * then wait for a potentially exiting worker,
755          * start the worker, and schedule one no_op.
756          * then proceed with configuration.
757          */
758         if (ns.disk == D_DISKLESS &&
759             ns.conn == C_STANDALONE &&
760             ns.role == R_SECONDARY &&
761             !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
762                 set_bit(DEVICE_DYING, &mdev->flags);
763
764         /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
765          * on the ldev here, to be sure the transition -> D_DISKLESS resp.
766          * drbd_ldev_destroy() won't happen before our corresponding
767          * after_state_ch works run, where we put_ldev again. */
768         if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
769             (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
770                 atomic_inc(&mdev->local_cnt);
771
772         mdev->state = ns;
773
774         if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
775                 drbd_print_uuids(mdev, "attached to UUIDs");
776
777         wake_up(&mdev->misc_wait);
778         wake_up(&mdev->state_wait);
779         wake_up(&mdev->tconn->ping_wait);
780
781         /* aborted verify run. log the last position */
782         if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
783             ns.conn < C_CONNECTED) {
784                 mdev->ov_start_sector =
785                         BM_BIT_TO_SECT(drbd_bm_bits(mdev) - mdev->ov_left);
786                 dev_info(DEV, "Online Verify reached sector %llu\n",
787                         (unsigned long long)mdev->ov_start_sector);
788         }
789
790         if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
791             (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
792                 dev_info(DEV, "Syncer continues.\n");
793                 mdev->rs_paused += (long)jiffies
794                                   -(long)mdev->rs_mark_time[mdev->rs_last_mark];
795                 if (ns.conn == C_SYNC_TARGET)
796                         mod_timer(&mdev->resync_timer, jiffies);
797         }
798
799         if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
800             (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
801                 dev_info(DEV, "Resync suspended\n");
802                 mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
803         }
804
805         if (os.conn == C_CONNECTED &&
806             (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
807                 unsigned long now = jiffies;
808                 int i;
809
810                 set_ov_position(mdev, ns.conn);
811                 mdev->rs_start = now;
812                 mdev->rs_last_events = 0;
813                 mdev->rs_last_sect_ev = 0;
814                 mdev->ov_last_oos_size = 0;
815                 mdev->ov_last_oos_start = 0;
816
817                 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
818                         mdev->rs_mark_left[i] = mdev->ov_left;
819                         mdev->rs_mark_time[i] = now;
820                 }
821
822                 drbd_rs_controller_reset(mdev);
823
824                 if (ns.conn == C_VERIFY_S) {
825                         dev_info(DEV, "Starting Online Verify from sector %llu\n",
826                                         (unsigned long long)mdev->ov_position);
827                         mod_timer(&mdev->resync_timer, jiffies);
828                 }
829         }
830
831         if (get_ldev(mdev)) {
832                 u32 mdf = mdev->ldev->md.flags & ~(MDF_CONSISTENT|MDF_PRIMARY_IND|
833                                                  MDF_CONNECTED_IND|MDF_WAS_UP_TO_DATE|
834                                                  MDF_PEER_OUT_DATED|MDF_CRASHED_PRIMARY);
835
836                 if (test_bit(CRASHED_PRIMARY, &mdev->flags))
837                         mdf |= MDF_CRASHED_PRIMARY;
838                 if (mdev->state.role == R_PRIMARY ||
839                     (mdev->state.pdsk < D_INCONSISTENT && mdev->state.peer == R_PRIMARY))
840                         mdf |= MDF_PRIMARY_IND;
841                 if (mdev->state.conn > C_WF_REPORT_PARAMS)
842                         mdf |= MDF_CONNECTED_IND;
843                 if (mdev->state.disk > D_INCONSISTENT)
844                         mdf |= MDF_CONSISTENT;
845                 if (mdev->state.disk > D_OUTDATED)
846                         mdf |= MDF_WAS_UP_TO_DATE;
847                 if (mdev->state.pdsk <= D_OUTDATED && mdev->state.pdsk >= D_INCONSISTENT)
848                         mdf |= MDF_PEER_OUT_DATED;
849                 if (mdf != mdev->ldev->md.flags) {
850                         mdev->ldev->md.flags = mdf;
851                         drbd_md_mark_dirty(mdev);
852                 }
853                 if (os.disk < D_CONSISTENT && ns.disk >= D_CONSISTENT)
854                         drbd_set_ed_uuid(mdev, mdev->ldev->md.uuid[UI_CURRENT]);
855                 put_ldev(mdev);
856         }
857
858         /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
859         if (os.disk == D_INCONSISTENT && os.pdsk == D_INCONSISTENT &&
860             os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
861                 set_bit(CONSIDER_RESYNC, &mdev->flags);
862
863         /* Receiver should clean up itself */
864         if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
865                 drbd_thread_stop_nowait(&mdev->tconn->receiver);
866
867         /* Now the receiver finished cleaning up itself, it should die */
868         if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
869                 drbd_thread_stop_nowait(&mdev->tconn->receiver);
870
871         /* Upon network failure, we need to restart the receiver. */
872         if (os.conn > C_TEAR_DOWN &&
873             ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
874                 drbd_thread_restart_nowait(&mdev->tconn->receiver);
875
876         /* Resume AL writing if we get a connection */
877         if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
878                 drbd_resume_al(mdev);
879
880         ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
881         if (ascw) {
882                 ascw->os = os;
883                 ascw->ns = ns;
884                 ascw->flags = flags;
885                 ascw->w.cb = w_after_state_ch;
886                 ascw->w.mdev = mdev;
887                 ascw->done = done;
888                 drbd_queue_work(&mdev->tconn->data.work, &ascw->w);
889         } else {
890                 dev_warn(DEV, "Could not kmalloc an ascw\n");
891         }
892
893         return rv;
894 }
895
896 static int w_after_state_ch(struct drbd_work *w, int unused)
897 {
898         struct after_state_chg_work *ascw =
899                 container_of(w, struct after_state_chg_work, w);
900         struct drbd_conf *mdev = w->mdev;
901
902         after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags);
903         if (ascw->flags & CS_WAIT_COMPLETE) {
904                 D_ASSERT(ascw->done != NULL);
905                 complete(ascw->done);
906         }
907         kfree(ascw);
908
909         return 1;
910 }
911
912 static void abw_start_sync(struct drbd_conf *mdev, int rv)
913 {
914         if (rv) {
915                 dev_err(DEV, "Writing the bitmap failed not starting resync.\n");
916                 _drbd_request_state(mdev, NS(conn, C_CONNECTED), CS_VERBOSE);
917                 return;
918         }
919
920         switch (mdev->state.conn) {
921         case C_STARTING_SYNC_T:
922                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
923                 break;
924         case C_STARTING_SYNC_S:
925                 drbd_start_resync(mdev, C_SYNC_SOURCE);
926                 break;
927         }
928 }
929
930 int drbd_bitmap_io_from_worker(struct drbd_conf *mdev,
931                 int (*io_fn)(struct drbd_conf *),
932                 char *why, enum bm_flag flags)
933 {
934         int rv;
935
936         D_ASSERT(current == mdev->tconn->worker.task);
937
938         /* open coded non-blocking drbd_suspend_io(mdev); */
939         set_bit(SUSPEND_IO, &mdev->flags);
940
941         drbd_bm_lock(mdev, why, flags);
942         rv = io_fn(mdev);
943         drbd_bm_unlock(mdev);
944
945         drbd_resume_io(mdev);
946
947         return rv;
948 }
949
950 /**
951  * after_state_ch() - Perform after state change actions that may sleep
952  * @mdev:       DRBD device.
953  * @os:         old state.
954  * @ns:         new state.
955  * @flags:      Flags
956  */
957 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
958                            union drbd_state ns, enum chg_state_flags flags)
959 {
960         enum drbd_fencing_p fp;
961         enum drbd_req_event what = NOTHING;
962         union drbd_state nsm = (union drbd_state){ .i = -1 };
963
964         if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
965                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
966                 if (mdev->p_uuid)
967                         mdev->p_uuid[UI_FLAGS] &= ~((u64)2);
968         }
969
970         fp = FP_DONT_CARE;
971         if (get_ldev(mdev)) {
972                 fp = mdev->ldev->dc.fencing;
973                 put_ldev(mdev);
974         }
975
976         /* Inform userspace about the change... */
977         drbd_bcast_state(mdev, ns);
978
979         if (!(os.role == R_PRIMARY && os.disk < D_UP_TO_DATE && os.pdsk < D_UP_TO_DATE) &&
980             (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
981                 drbd_khelper(mdev, "pri-on-incon-degr");
982
983         /* Here we have the actions that are performed after a
984            state change. This function might sleep */
985
986         nsm.i = -1;
987         if (ns.susp_nod) {
988                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
989                         what = RESEND;
990
991                 if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
992                         what = RESTART_FROZEN_DISK_IO;
993
994                 if (what != NOTHING)
995                         nsm.susp_nod = 0;
996         }
997
998         if (ns.susp_fen) {
999                 /* case1: The outdate peer handler is successful: */
1000                 if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
1001                         tl_clear(mdev);
1002                         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
1003                                 drbd_uuid_new_current(mdev);
1004                                 clear_bit(NEW_CUR_UUID, &mdev->flags);
1005                         }
1006                         spin_lock_irq(&mdev->tconn->req_lock);
1007                         _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
1008                         spin_unlock_irq(&mdev->tconn->req_lock);
1009                 }
1010                 /* case2: The connection was established again: */
1011                 if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
1012                         clear_bit(NEW_CUR_UUID, &mdev->flags);
1013                         what = RESEND;
1014                         nsm.susp_fen = 0;
1015                 }
1016         }
1017
1018         if (what != NOTHING) {
1019                 spin_lock_irq(&mdev->tconn->req_lock);
1020                 _tl_restart(mdev, what);
1021                 nsm.i &= mdev->state.i;
1022                 _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
1023                 spin_unlock_irq(&mdev->tconn->req_lock);
1024         }
1025
1026         /* Became sync source.  With protocol >= 96, we still need to send out
1027          * the sync uuid now. Need to do that before any drbd_send_state, or
1028          * the other side may go "paused sync" before receiving the sync uuids,
1029          * which is unexpected. */
1030         if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
1031             (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
1032             mdev->tconn->agreed_pro_version >= 96 && get_ldev(mdev)) {
1033                 drbd_gen_and_send_sync_uuid(mdev);
1034                 put_ldev(mdev);
1035         }
1036
1037         /* Do not change the order of the if above and the two below... */
1038         if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
1039                 drbd_send_uuids(mdev);
1040                 drbd_send_state(mdev);
1041         }
1042         /* No point in queuing send_bitmap if we don't have a connection
1043          * anymore, so check also the _current_ state, not only the new state
1044          * at the time this work was queued. */
1045         if (os.conn != C_WF_BITMAP_S && ns.conn == C_WF_BITMAP_S &&
1046             mdev->state.conn == C_WF_BITMAP_S)
1047                 drbd_queue_bitmap_io(mdev, &drbd_send_bitmap, NULL,
1048                                 "send_bitmap (WFBitMapS)",
1049                                 BM_LOCKED_TEST_ALLOWED);
1050
1051         /* Lost contact to peer's copy of the data */
1052         if ((os.pdsk >= D_INCONSISTENT &&
1053              os.pdsk != D_UNKNOWN &&
1054              os.pdsk != D_OUTDATED)
1055         &&  (ns.pdsk < D_INCONSISTENT ||
1056              ns.pdsk == D_UNKNOWN ||
1057              ns.pdsk == D_OUTDATED)) {
1058                 if (get_ldev(mdev)) {
1059                         if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
1060                             mdev->ldev->md.uuid[UI_BITMAP] == 0 && ns.disk >= D_UP_TO_DATE) {
1061                                 if (is_susp(mdev->state)) {
1062                                         set_bit(NEW_CUR_UUID, &mdev->flags);
1063                                 } else {
1064                                         drbd_uuid_new_current(mdev);
1065                                         drbd_send_uuids(mdev);
1066                                 }
1067                         }
1068                         put_ldev(mdev);
1069                 }
1070         }
1071
1072         if (ns.pdsk < D_INCONSISTENT && get_ldev(mdev)) {
1073                 if (ns.peer == R_PRIMARY && mdev->ldev->md.uuid[UI_BITMAP] == 0) {
1074                         drbd_uuid_new_current(mdev);
1075                         drbd_send_uuids(mdev);
1076                 }
1077
1078                 /* D_DISKLESS Peer becomes secondary */
1079                 if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
1080                         /* We may still be Primary ourselves.
1081                          * No harm done if the bitmap still changes,
1082                          * redirtied pages will follow later. */
1083                         drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1084                                 "demote diskless peer", BM_LOCKED_SET_ALLOWED);
1085                 put_ldev(mdev);
1086         }
1087
1088         /* Write out all changed bits on demote.
1089          * Though, no need to da that just yet
1090          * if there is a resync going on still */
1091         if (os.role == R_PRIMARY && ns.role == R_SECONDARY &&
1092                 mdev->state.conn <= C_CONNECTED && get_ldev(mdev)) {
1093                 /* No changes to the bitmap expected this time, so assert that,
1094                  * even though no harm was done if it did change. */
1095                 drbd_bitmap_io_from_worker(mdev, &drbd_bm_write,
1096                                 "demote", BM_LOCKED_TEST_ALLOWED);
1097                 put_ldev(mdev);
1098         }
1099
1100         /* Last part of the attaching process ... */
1101         if (ns.conn >= C_CONNECTED &&
1102             os.disk == D_ATTACHING && ns.disk == D_NEGOTIATING) {
1103                 drbd_send_sizes(mdev, 0, 0);  /* to start sync... */
1104                 drbd_send_uuids(mdev);
1105                 drbd_send_state(mdev);
1106         }
1107
1108         /* We want to pause/continue resync, tell peer. */
1109         if (ns.conn >= C_CONNECTED &&
1110              ((os.aftr_isp != ns.aftr_isp) ||
1111               (os.user_isp != ns.user_isp)))
1112                 drbd_send_state(mdev);
1113
1114         /* In case one of the isp bits got set, suspend other devices. */
1115         if ((!os.aftr_isp && !os.peer_isp && !os.user_isp) &&
1116             (ns.aftr_isp || ns.peer_isp || ns.user_isp))
1117                 suspend_other_sg(mdev);
1118
1119         /* Make sure the peer gets informed about eventual state
1120            changes (ISP bits) while we were in WFReportParams. */
1121         if (os.conn == C_WF_REPORT_PARAMS && ns.conn >= C_CONNECTED)
1122                 drbd_send_state(mdev);
1123
1124         if (os.conn != C_AHEAD && ns.conn == C_AHEAD)
1125                 drbd_send_state(mdev);
1126
1127         /* We are in the progress to start a full sync... */
1128         if ((os.conn != C_STARTING_SYNC_T && ns.conn == C_STARTING_SYNC_T) ||
1129             (os.conn != C_STARTING_SYNC_S && ns.conn == C_STARTING_SYNC_S))
1130                 /* no other bitmap changes expected during this phase */
1131                 drbd_queue_bitmap_io(mdev,
1132                         &drbd_bmio_set_n_write, &abw_start_sync,
1133                         "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED);
1134
1135         /* We are invalidating our self... */
1136         if (os.conn < C_CONNECTED && ns.conn < C_CONNECTED &&
1137             os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
1138                 /* other bitmap operation expected during this phase */
1139                 drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL,
1140                         "set_n_write from invalidate", BM_LOCKED_MASK);
1141
1142         /* first half of local IO error, failure to attach,
1143          * or administrative detach */
1144         if (os.disk != D_FAILED && ns.disk == D_FAILED) {
1145                 enum drbd_io_error_p eh;
1146                 int was_io_error;
1147                 /* corresponding get_ldev was in __drbd_set_state, to serialize
1148                  * our cleanup here with the transition to D_DISKLESS,
1149                  * so it is safe to dreference ldev here. */
1150                 eh = mdev->ldev->dc.on_io_error;
1151                 was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
1152
1153                 /* current state still has to be D_FAILED,
1154                  * there is only one way out: to D_DISKLESS,
1155                  * and that may only happen after our put_ldev below. */
1156                 if (mdev->state.disk != D_FAILED)
1157                         dev_err(DEV,
1158                                 "ASSERT FAILED: disk is %s during detach\n",
1159                                 drbd_disk_str(mdev->state.disk));
1160
1161                 if (drbd_send_state(mdev))
1162                         dev_warn(DEV, "Notified peer that I am detaching my disk\n");
1163                 else
1164                         dev_err(DEV, "Sending state for detaching disk failed\n");
1165
1166                 drbd_rs_cancel_all(mdev);
1167
1168                 /* In case we want to get something to stable storage still,
1169                  * this may be the last chance.
1170                  * Following put_ldev may transition to D_DISKLESS. */
1171                 drbd_md_sync(mdev);
1172                 put_ldev(mdev);
1173
1174                 if (was_io_error && eh == EP_CALL_HELPER)
1175                         drbd_khelper(mdev, "local-io-error");
1176         }
1177
1178         /* second half of local IO error, failure to attach,
1179          * or administrative detach,
1180          * after local_cnt references have reached zero again */
1181         if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
1182                 /* We must still be diskless,
1183                  * re-attach has to be serialized with this! */
1184                 if (mdev->state.disk != D_DISKLESS)
1185                         dev_err(DEV,
1186                                 "ASSERT FAILED: disk is %s while going diskless\n",
1187                                 drbd_disk_str(mdev->state.disk));
1188
1189                 mdev->rs_total = 0;
1190                 mdev->rs_failed = 0;
1191                 atomic_set(&mdev->rs_pending_cnt, 0);
1192
1193                 if (drbd_send_state(mdev))
1194                         dev_warn(DEV, "Notified peer that I'm now diskless.\n");
1195                 /* corresponding get_ldev in __drbd_set_state
1196                  * this may finally trigger drbd_ldev_destroy. */
1197                 put_ldev(mdev);
1198         }
1199
1200         /* Notify peer that I had a local IO error, and did not detached.. */
1201         if (os.disk == D_UP_TO_DATE && ns.disk == D_INCONSISTENT)
1202                 drbd_send_state(mdev);
1203
1204         /* Disks got bigger while they were detached */
1205         if (ns.disk > D_NEGOTIATING && ns.pdsk > D_NEGOTIATING &&
1206             test_and_clear_bit(RESYNC_AFTER_NEG, &mdev->flags)) {
1207                 if (ns.conn == C_CONNECTED)
1208                         resync_after_online_grow(mdev);
1209         }
1210
1211         /* A resync finished or aborted, wake paused devices... */
1212         if ((os.conn > C_CONNECTED && ns.conn <= C_CONNECTED) ||
1213             (os.peer_isp && !ns.peer_isp) ||
1214             (os.user_isp && !ns.user_isp))
1215                 resume_next_sg(mdev);
1216
1217         /* sync target done with resync.  Explicitly notify peer, even though
1218          * it should (at least for non-empty resyncs) already know itself. */
1219         if (os.disk < D_UP_TO_DATE && os.conn >= C_SYNC_SOURCE && ns.conn == C_CONNECTED)
1220                 drbd_send_state(mdev);
1221
1222         /* This triggers bitmap writeout of potentially still unwritten pages
1223          * if the resync finished cleanly, or aborted because of peer disk
1224          * failure, or because of connection loss.
1225          * For resync aborted because of local disk failure, we cannot do
1226          * any bitmap writeout anymore.
1227          * No harm done if some bits change during this phase.
1228          */
1229         if (os.conn > C_CONNECTED && ns.conn <= C_CONNECTED && get_ldev(mdev)) {
1230                 drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL,
1231                         "write from resync_finished", BM_LOCKED_SET_ALLOWED);
1232                 put_ldev(mdev);
1233         }
1234
1235         if (ns.disk == D_DISKLESS &&
1236             ns.conn == C_STANDALONE &&
1237             ns.role == R_SECONDARY) {
1238                 if (os.aftr_isp != ns.aftr_isp)
1239                         resume_next_sg(mdev);
1240         }
1241
1242         after_conn_state_ch(mdev->tconn, os, ns, flags);
1243         drbd_md_sync(mdev);
1244 }
1245
1246 static void after_conn_state_ch(struct drbd_tconn *tconn, union drbd_state os,
1247                                 union drbd_state ns, enum chg_state_flags flags)
1248 {
1249         /* Upon network configuration, we need to start the receiver */
1250         if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
1251                 drbd_thread_start(&tconn->receiver);
1252
1253         if (ns.disk == D_DISKLESS &&
1254             ns.conn == C_STANDALONE &&
1255             ns.role == R_SECONDARY) {
1256                 /* if (test_bit(DEVICE_DYING, &mdev->flags)) TODO: DEVICE_DYING functionality */
1257                 drbd_thread_stop_nowait(&tconn->worker);
1258         }
1259 }