Merge branch 'for-arm-soc' of git://git.armlinux.org.uk/~rmk/linux-arm into arm/soc
[linux-2.6-block.git] / drivers / scsi / device_handler / scsi_dh_alua.c
CommitLineData
1a59d1b8 1// SPDX-License-Identifier: GPL-2.0-or-later
057ea7c9
HR
2/*
3 * Generic SCSI-3 ALUA SCSI Device Handler
4 *
69723d17 5 * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH.
057ea7c9 6 * All rights reserved.
057ea7c9 7 */
5a0e3ad6 8#include <linux/slab.h>
69723d17 9#include <linux/delay.h>
acf3368f 10#include <linux/module.h>
a7089770 11#include <asm/unaligned.h>
057ea7c9 12#include <scsi/scsi.h>
cb0a168c 13#include <scsi/scsi_proto.h>
80bd68d6 14#include <scsi/scsi_dbg.h>
057ea7c9
HR
15#include <scsi/scsi_eh.h>
16#include <scsi/scsi_dh.h>
17
18#define ALUA_DH_NAME "alua"
e79c82cc 19#define ALUA_DH_VER "2.0"
057ea7c9 20
057ea7c9
HR
21#define TPGS_SUPPORT_NONE 0x00
22#define TPGS_SUPPORT_OPTIMIZED 0x01
23#define TPGS_SUPPORT_NONOPTIMIZED 0x02
24#define TPGS_SUPPORT_STANDBY 0x04
25#define TPGS_SUPPORT_UNAVAILABLE 0x08
69723d17 26#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
057ea7c9
HR
27#define TPGS_SUPPORT_OFFLINE 0x40
28#define TPGS_SUPPORT_TRANSITION 0x80
80c716fa 29#define TPGS_SUPPORT_ALL 0xdf
057ea7c9 30
3588c5a2
RE
31#define RTPG_FMT_MASK 0x70
32#define RTPG_FMT_EXT_HDR 0x10
33
057ea7c9
HR
34#define TPGS_MODE_UNINITIALIZED -1
35#define TPGS_MODE_NONE 0x0
36#define TPGS_MODE_IMPLICIT 0x1
37#define TPGS_MODE_EXPLICIT 0x2
38
c49c8345 39#define ALUA_RTPG_SIZE 128
3588c5a2 40#define ALUA_FAILOVER_TIMEOUT 60
057ea7c9 41#define ALUA_FAILOVER_RETRIES 5
03197b61 42#define ALUA_RTPG_DELAY_MSECS 5
057ea7c9 43
6c4fc044 44/* device handler flags */
03197b61
HR
45#define ALUA_OPTIMIZE_STPG 0x01
46#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
47/* State machine flags */
48#define ALUA_PG_RUN_RTPG 0x10
49#define ALUA_PG_RUN_STPG 0x20
50#define ALUA_PG_RUNNING 0x40
4335d092 51
aa90f490
HR
52static uint optimize_stpg;
53module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
54MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
55
43394c67
HR
56static LIST_HEAD(port_group_list);
57static DEFINE_SPINLOCK(port_group_lock);
03197b61 58static struct workqueue_struct *kaluad_wq;
43394c67
HR
59
60struct alua_port_group {
61 struct kref kref;
03197b61 62 struct rcu_head rcu;
43394c67 63 struct list_head node;
cb0a168c 64 struct list_head dh_list;
0047220c
HR
65 unsigned char device_id_str[256];
66 int device_id_len;
057ea7c9 67 int group_id;
057ea7c9
HR
68 int tpgs;
69 int state;
dcd3a754 70 int pref;
80c716fa 71 int valid_states;
4335d092 72 unsigned flags; /* used for optimizing STPG */
3588c5a2 73 unsigned char transition_tmo;
03197b61
HR
74 unsigned long expiry;
75 unsigned long interval;
76 struct delayed_work rtpg_work;
77 spinlock_t lock;
78 struct list_head rtpg_list;
79 struct scsi_device *rtpg_sdev;
43394c67
HR
80};
81
82struct alua_dh_data {
cb0a168c 83 struct list_head node;
d29425b0 84 struct alua_port_group __rcu *pg;
43394c67 85 int group_id;
03197b61 86 spinlock_t pg_lock;
96e65865 87 struct scsi_device *sdev;
03197b61
HR
88 int init_error;
89 struct mutex init_mutex;
90};
91
92struct alua_queue_data {
93 struct list_head entry;
96e65865
CS
94 activate_complete callback_fn;
95 void *callback_data;
057ea7c9
HR
96};
97
98#define ALUA_POLICY_SWITCH_CURRENT 0
99#define ALUA_POLICY_SWITCH_ALL 1
100
03197b61 101static void alua_rtpg_work(struct work_struct *work);
7cb689fe 102static bool alua_rtpg_queue(struct alua_port_group *pg,
03197b61 103 struct scsi_device *sdev,
2b35865e
HR
104 struct alua_queue_data *qdata, bool force);
105static void alua_check(struct scsi_device *sdev, bool force);
96e65865 106
43394c67
HR
107static void release_port_group(struct kref *kref)
108{
109 struct alua_port_group *pg;
110
111 pg = container_of(kref, struct alua_port_group, kref);
03197b61
HR
112 if (pg->rtpg_sdev)
113 flush_delayed_work(&pg->rtpg_work);
43394c67
HR
114 spin_lock(&port_group_lock);
115 list_del(&pg->node);
116 spin_unlock(&port_group_lock);
03197b61 117 kfree_rcu(pg, rcu);
43394c67
HR
118}
119
057ea7c9
HR
120/*
121 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
122 * @sdev: sdev the command should be sent to
123 */
40bb61a7
HR
124static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
125 int bufflen, struct scsi_sense_hdr *sshdr, int flags)
057ea7c9 126{
92eb5062 127 u8 cdb[MAX_COMMAND_SIZE];
40bb61a7
HR
128 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
129 REQ_FAILFAST_DRIVER;
057ea7c9
HR
130
131 /* Prepare the command. */
92eb5062 132 memset(cdb, 0x0, MAX_COMMAND_SIZE);
40bb61a7 133 cdb[0] = MAINTENANCE_IN;
d42ae5f3 134 if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
40bb61a7 135 cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
8e67ce60 136 else
40bb61a7
HR
137 cdb[1] = MI_REPORT_TARGET_PGS;
138 put_unaligned_be32(bufflen, &cdb[6]);
139
fcbfffe2
CH
140 return scsi_execute(sdev, cdb, DMA_FROM_DEVICE, buff, bufflen, NULL,
141 sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
142 ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
057ea7c9
HR
143}
144
96e65865 145/*
b2460756 146 * submit_stpg - Issue a SET TARGET PORT GROUP command
057ea7c9
HR
147 *
148 * Currently we're only setting the current target port group state
149 * to 'active/optimized' and let the array firmware figure out
150 * the states of the remaining groups.
151 */
40bb61a7
HR
152static int submit_stpg(struct scsi_device *sdev, int group_id,
153 struct scsi_sense_hdr *sshdr)
057ea7c9 154{
92eb5062 155 u8 cdb[MAX_COMMAND_SIZE];
b2460756 156 unsigned char stpg_data[8];
057ea7c9 157 int stpg_len = 8;
40bb61a7
HR
158 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
159 REQ_FAILFAST_DRIVER;
057ea7c9
HR
160
161 /* Prepare the data buffer */
b2460756 162 memset(stpg_data, 0, stpg_len);
5115fc7e 163 stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL;
b2460756 164 put_unaligned_be16(group_id, &stpg_data[6]);
057ea7c9 165
057ea7c9 166 /* Prepare the command. */
92eb5062 167 memset(cdb, 0x0, MAX_COMMAND_SIZE);
40bb61a7
HR
168 cdb[0] = MAINTENANCE_OUT;
169 cdb[1] = MO_SET_TARGET_PGS;
170 put_unaligned_be32(stpg_len, &cdb[6]);
171
fcbfffe2
CH
172 return scsi_execute(sdev, cdb, DMA_TO_DEVICE, stpg_data, stpg_len, NULL,
173 sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
174 ALUA_FAILOVER_RETRIES, req_flags, 0, NULL);
057ea7c9
HR
175}
176
1f275f97
BVA
177static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
178 int group_id)
0047220c
HR
179{
180 struct alua_port_group *pg;
181
fe8b9534
HR
182 if (!id_str || !id_size || !strlen(id_str))
183 return NULL;
184
0047220c
HR
185 list_for_each_entry(pg, &port_group_list, node) {
186 if (pg->group_id != group_id)
187 continue;
fe8b9534 188 if (!pg->device_id_len || pg->device_id_len != id_size)
0047220c
HR
189 continue;
190 if (strncmp(pg->device_id_str, id_str, id_size))
191 continue;
192 if (!kref_get_unless_zero(&pg->kref))
193 continue;
194 return pg;
195 }
196
197 return NULL;
198}
199
43394c67
HR
200/*
201 * alua_alloc_pg - Allocate a new port_group structure
202 * @sdev: scsi device
43394c67 203 * @group_id: port group id
2e8c3002 204 * @tpgs: target port group settings
43394c67
HR
205 *
206 * Allocate a new port_group structure for a given
207 * device.
208 */
1f275f97
BVA
209static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
210 int group_id, int tpgs)
43394c67 211{
0047220c 212 struct alua_port_group *pg, *tmp_pg;
43394c67
HR
213
214 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
215 if (!pg)
0047220c 216 return ERR_PTR(-ENOMEM);
43394c67 217
0047220c
HR
218 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
219 sizeof(pg->device_id_str));
220 if (pg->device_id_len <= 0) {
221 /*
fe8b9534
HR
222 * TPGS supported but no device identification found.
223 * Generate private device identification.
0047220c 224 */
0047220c
HR
225 sdev_printk(KERN_INFO, sdev,
226 "%s: No device descriptors found\n",
227 ALUA_DH_NAME);
fe8b9534
HR
228 pg->device_id_str[0] = '\0';
229 pg->device_id_len = 0;
0047220c 230 }
43394c67
HR
231 pg->group_id = group_id;
232 pg->tpgs = tpgs;
5115fc7e 233 pg->state = SCSI_ACCESS_STATE_OPTIMAL;
80c716fa 234 pg->valid_states = TPGS_SUPPORT_ALL;
aa90f490
HR
235 if (optimize_stpg)
236 pg->flags |= ALUA_OPTIMIZE_STPG;
43394c67 237 kref_init(&pg->kref);
03197b61
HR
238 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
239 INIT_LIST_HEAD(&pg->rtpg_list);
240 INIT_LIST_HEAD(&pg->node);
cb0a168c 241 INIT_LIST_HEAD(&pg->dh_list);
03197b61 242 spin_lock_init(&pg->lock);
0047220c 243
43394c67 244 spin_lock(&port_group_lock);
0047220c
HR
245 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
246 group_id);
247 if (tmp_pg) {
248 spin_unlock(&port_group_lock);
249 kfree(pg);
250 return tmp_pg;
251 }
252
43394c67
HR
253 list_add(&pg->node, &port_group_list);
254 spin_unlock(&port_group_lock);
255
256 return pg;
257}
258
057ea7c9 259/*
d7c48feb 260 * alua_check_tpgs - Evaluate TPGS setting
057ea7c9
HR
261 * @sdev: device to be checked
262 *
d7c48feb 263 * Examine the TPGS setting of the sdev to find out if ALUA
057ea7c9
HR
264 * is supported.
265 */
ad0ea64c 266static int alua_check_tpgs(struct scsi_device *sdev)
057ea7c9 267{
ad0ea64c 268 int tpgs = TPGS_MODE_NONE;
057ea7c9 269
db5a6a60
HR
270 /*
271 * ALUA support for non-disk devices is fraught with
272 * difficulties, so disable it for now.
273 */
274 if (sdev->type != TYPE_DISK) {
db5a6a60
HR
275 sdev_printk(KERN_INFO, sdev,
276 "%s: disable for non-disk devices\n",
277 ALUA_DH_NAME);
ad0ea64c 278 return tpgs;
db5a6a60
HR
279 }
280
ad0ea64c
HR
281 tpgs = scsi_device_tpgs(sdev);
282 switch (tpgs) {
057ea7c9
HR
283 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
284 sdev_printk(KERN_INFO, sdev,
285 "%s: supports implicit and explicit TPGS\n",
286 ALUA_DH_NAME);
287 break;
288 case TPGS_MODE_EXPLICIT:
289 sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
290 ALUA_DH_NAME);
291 break;
292 case TPGS_MODE_IMPLICIT:
293 sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
294 ALUA_DH_NAME);
295 break;
6cc05d45 296 case TPGS_MODE_NONE:
057ea7c9
HR
297 sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
298 ALUA_DH_NAME);
057ea7c9 299 break;
6cc05d45
HR
300 default:
301 sdev_printk(KERN_INFO, sdev,
302 "%s: unsupported TPGS setting %d\n",
ad0ea64c
HR
303 ALUA_DH_NAME, tpgs);
304 tpgs = TPGS_MODE_NONE;
6cc05d45 305 break;
057ea7c9
HR
306 }
307
ad0ea64c 308 return tpgs;
057ea7c9
HR
309}
310
311/*
9b80dcec 312 * alua_check_vpd - Evaluate INQUIRY vpd page 0x83
057ea7c9
HR
313 * @sdev: device to be checked
314 *
315 * Extract the relative target port and the target port group
316 * descriptor from the list of identificators.
317 */
a4253fde
HR
318static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
319 int tpgs)
057ea7c9 320{
83ea0e5e 321 int rel_port = -1, group_id;
03197b61 322 struct alua_port_group *pg, *old_pg = NULL;
100bcb85 323 bool pg_updated = false;
cb0a168c 324 unsigned long flags;
057ea7c9 325
83ea0e5e
HR
326 group_id = scsi_vpd_tpg_id(sdev, &rel_port);
327 if (group_id < 0) {
057ea7c9
HR
328 /*
329 * Internal error; TPGS supported but required
330 * VPD identification descriptors not present.
331 * Disable ALUA support
332 */
333 sdev_printk(KERN_INFO, sdev,
334 "%s: No target port descriptors found\n",
335 ALUA_DH_NAME);
9b80dcec 336 return SCSI_DH_DEV_UNSUPP;
057ea7c9 337 }
a4253fde 338
03197b61
HR
339 pg = alua_alloc_pg(sdev, group_id, tpgs);
340 if (IS_ERR(pg)) {
341 if (PTR_ERR(pg) == -ENOMEM)
a4253fde
HR
342 return SCSI_DH_NOMEM;
343 return SCSI_DH_DEV_UNSUPP;
344 }
fe8b9534
HR
345 if (pg->device_id_len)
346 sdev_printk(KERN_INFO, sdev,
347 "%s: device %s port group %x rel port %x\n",
348 ALUA_DH_NAME, pg->device_id_str,
349 group_id, rel_port);
350 else
351 sdev_printk(KERN_INFO, sdev,
352 "%s: port group %x rel port %x\n",
353 ALUA_DH_NAME, group_id, rel_port);
03197b61
HR
354
355 /* Check for existing port group references */
356 spin_lock(&h->pg_lock);
d29425b0 357 old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
03197b61
HR
358 if (old_pg != pg) {
359 /* port group has changed. Update to new port group */
cb0a168c
HR
360 if (h->pg) {
361 spin_lock_irqsave(&old_pg->lock, flags);
362 list_del_rcu(&h->node);
363 spin_unlock_irqrestore(&old_pg->lock, flags);
364 }
03197b61 365 rcu_assign_pointer(h->pg, pg);
cb0a168c 366 pg_updated = true;
03197b61 367 }
cb0a168c
HR
368
369 spin_lock_irqsave(&pg->lock, flags);
cb0a168c
HR
370 if (pg_updated)
371 list_add_rcu(&h->node, &pg->dh_list);
372 spin_unlock_irqrestore(&pg->lock, flags);
373
d29425b0
BVA
374 alua_rtpg_queue(rcu_dereference_protected(h->pg,
375 lockdep_is_held(&h->pg_lock)),
376 sdev, NULL, true);
03197b61
HR
377 spin_unlock(&h->pg_lock);
378
379 if (old_pg)
380 kref_put(&old_pg->kref, release_port_group);
057ea7c9 381
03197b61 382 return SCSI_DH_OK;
057ea7c9
HR
383}
384
5115fc7e 385static char print_alua_state(unsigned char state)
057ea7c9
HR
386{
387 switch (state) {
5115fc7e 388 case SCSI_ACCESS_STATE_OPTIMAL:
057ea7c9 389 return 'A';
5115fc7e 390 case SCSI_ACCESS_STATE_ACTIVE:
057ea7c9 391 return 'N';
5115fc7e 392 case SCSI_ACCESS_STATE_STANDBY:
057ea7c9 393 return 'S';
5115fc7e 394 case SCSI_ACCESS_STATE_UNAVAILABLE:
057ea7c9 395 return 'U';
5115fc7e 396 case SCSI_ACCESS_STATE_LBA:
69723d17 397 return 'L';
5115fc7e 398 case SCSI_ACCESS_STATE_OFFLINE:
057ea7c9 399 return 'O';
5115fc7e 400 case SCSI_ACCESS_STATE_TRANSITIONING:
057ea7c9
HR
401 return 'T';
402 default:
403 return 'X';
404 }
405}
406
407static int alua_check_sense(struct scsi_device *sdev,
408 struct scsi_sense_hdr *sense_hdr)
409{
410 switch (sense_hdr->sense_key) {
411 case NOT_READY:
2b35865e 412 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
057ea7c9
HR
413 /*
414 * LUN Not Accessible - ALUA state transition
415 */
2b35865e
HR
416 alua_check(sdev, false);
417 return NEEDS_RETRY;
418 }
057ea7c9
HR
419 break;
420 case UNIT_ATTENTION:
2b35865e 421 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
057ea7c9 422 /*
2b35865e
HR
423 * Power On, Reset, or Bus Device Reset.
424 * Might have obscured a state transition,
425 * so schedule a recheck.
057ea7c9 426 */
2b35865e 427 alua_check(sdev, true);
c7dbb627 428 return ADD_TO_MLQUEUE;
2b35865e 429 }
c20ee7b5
SS
430 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
431 /*
432 * Device internal reset
433 */
434 return ADD_TO_MLQUEUE;
410f02d8
MB
435 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
436 /*
437 * Mode Parameters Changed
438 */
439 return ADD_TO_MLQUEUE;
2b35865e 440 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
057ea7c9
HR
441 /*
442 * ALUA state changed
443 */
2b35865e 444 alua_check(sdev, true);
c7dbb627 445 return ADD_TO_MLQUEUE;
2b35865e
HR
446 }
447 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
057ea7c9
HR
448 /*
449 * Implicit ALUA state transition failed
450 */
2b35865e 451 alua_check(sdev, true);
c7dbb627 452 return ADD_TO_MLQUEUE;
2b35865e 453 }
bf81973a
MB
454 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
455 /*
456 * Inquiry data has changed
457 */
458 return ADD_TO_MLQUEUE;
459 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e)
4d086f6b
IH
460 /*
461 * REPORTED_LUNS_DATA_HAS_CHANGED is reported
462 * when switching controllers on targets like
463 * Intel Multi-Flex. We can just retry.
464 */
465 return ADD_TO_MLQUEUE;
057ea7c9
HR
466 break;
467 }
468
469 return SCSI_RETURN_NOT_HANDLED;
470}
471
9d2c3039
HR
472/*
473 * alua_tur - Send a TEST UNIT READY
474 * @sdev: device to which the TEST UNIT READY command should be send
475 *
476 * Send a TEST UNIT READY to @sdev to figure out the device state
477 * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING,
478 * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise.
479 */
480static int alua_tur(struct scsi_device *sdev)
481{
482 struct scsi_sense_hdr sense_hdr;
483 int retval;
484
485 retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
486 ALUA_FAILOVER_RETRIES, &sense_hdr);
487 if (sense_hdr.sense_key == NOT_READY &&
488 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
489 return SCSI_DH_RETRY;
490 else if (retval)
491 return SCSI_DH_IO;
492 else
493 return SCSI_DH_OK;
494}
495
057ea7c9
HR
496/*
497 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
498 * @sdev: the device to be evaluated.
499 *
500 * Evaluate the Target Port Group State.
501 * Returns SCSI_DH_DEV_OFFLINED if the path is
25985edc 502 * found to be unusable.
057ea7c9 503 */
28261402 504static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
057ea7c9
HR
505{
506 struct scsi_sense_hdr sense_hdr;
c57168a1 507 struct alua_port_group *tmp_pg;
80c716fa 508 int len, k, off, bufflen = ALUA_RTPG_SIZE;
c57168a1 509 unsigned char *desc, *buff;
5597cafc 510 unsigned err, retval;
3588c5a2
RE
511 unsigned int tpg_desc_tbl_off;
512 unsigned char orig_transition_tmo;
c57168a1 513 unsigned long flags;
3588c5a2 514
03197b61
HR
515 if (!pg->expiry) {
516 unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
517
518 if (pg->transition_tmo)
519 transition_tmo = pg->transition_tmo * HZ;
520
521 pg->expiry = round_jiffies_up(jiffies + transition_tmo);
522 }
057ea7c9 523
c49c8345
HR
524 buff = kzalloc(bufflen, GFP_KERNEL);
525 if (!buff)
526 return SCSI_DH_DEV_TEMP_BUSY;
527
057ea7c9 528 retry:
a4bd8520 529 err = 0;
43394c67 530 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
40bb61a7 531
5597cafc 532 if (retval) {
80c716fa
HR
533 /*
534 * Some (broken) implementations have a habit of returning
535 * an error during things like firmware update etc.
536 * But if the target only supports active/optimized there's
537 * not much we can do; it's not that we can switch paths
538 * or anything.
539 * So ignore any errors to avoid spurious failures during
540 * path failover.
541 */
542 if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) {
543 sdev_printk(KERN_INFO, sdev,
544 "%s: ignoring rtpg result %d\n",
545 ALUA_DH_NAME, retval);
546 kfree(buff);
547 return SCSI_DH_OK;
548 }
40bb61a7 549 if (!scsi_sense_valid(&sense_hdr)) {
5597cafc
HR
550 sdev_printk(KERN_INFO, sdev,
551 "%s: rtpg failed, result %d\n",
552 ALUA_DH_NAME, retval);
c49c8345 553 kfree(buff);
40bb61a7 554 if (driver_byte(retval) == DRIVER_ERROR)
5597cafc 555 return SCSI_DH_DEV_TEMP_BUSY;
057ea7c9 556 return SCSI_DH_IO;
5597cafc 557 }
057ea7c9 558
8e67ce60
RE
559 /*
560 * submit_rtpg() has failed on existing arrays
561 * when requesting extended header info, and
562 * the array doesn't support extended headers,
563 * even though it shouldn't according to T10.
564 * The retry without rtpg_ext_hdr_req set
565 * handles this.
566 */
43394c67 567 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
8e67ce60
RE
568 sense_hdr.sense_key == ILLEGAL_REQUEST &&
569 sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
43394c67 570 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
8e67ce60
RE
571 goto retry;
572 }
e2d817db
HR
573 /*
574 * Retry on ALUA state transition or if any
575 * UNIT ATTENTION occurred.
576 */
577 if (sense_hdr.sense_key == NOT_READY &&
578 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
579 err = SCSI_DH_RETRY;
580 else if (sense_hdr.sense_key == UNIT_ATTENTION)
581 err = SCSI_DH_RETRY;
03197b61
HR
582 if (err == SCSI_DH_RETRY &&
583 pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
80bd68d6
HR
584 sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
585 ALUA_DH_NAME);
586 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
086acff2 587 kfree(buff);
03197b61 588 return err;
80bd68d6
HR
589 }
590 sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
591 ALUA_DH_NAME);
592 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
c49c8345 593 kfree(buff);
03197b61 594 pg->expiry = 0;
80bd68d6 595 return SCSI_DH_IO;
057ea7c9 596 }
057ea7c9 597
c49c8345 598 len = get_unaligned_be32(&buff[0]) + 4;
057ea7c9 599
c49c8345 600 if (len > bufflen) {
057ea7c9 601 /* Resubmit with the correct length */
c49c8345
HR
602 kfree(buff);
603 bufflen = len;
604 buff = kmalloc(bufflen, GFP_KERNEL);
605 if (!buff) {
057ea7c9 606 sdev_printk(KERN_WARNING, sdev,
cadbd4a5 607 "%s: kmalloc buffer failed\n",__func__);
057ea7c9 608 /* Temporary failure, bypass */
03197b61 609 pg->expiry = 0;
057ea7c9
HR
610 return SCSI_DH_DEV_TEMP_BUSY;
611 }
612 goto retry;
613 }
614
43394c67 615 orig_transition_tmo = pg->transition_tmo;
c49c8345 616 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0)
43394c67 617 pg->transition_tmo = buff[5];
3588c5a2 618 else
43394c67 619 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
3588c5a2 620
28261402 621 if (orig_transition_tmo != pg->transition_tmo) {
3588c5a2
RE
622 sdev_printk(KERN_INFO, sdev,
623 "%s: transition timeout set to %d seconds\n",
43394c67 624 ALUA_DH_NAME, pg->transition_tmo);
03197b61 625 pg->expiry = jiffies + pg->transition_tmo * HZ;
3588c5a2
RE
626 }
627
c49c8345 628 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
3588c5a2
RE
629 tpg_desc_tbl_off = 8;
630 else
631 tpg_desc_tbl_off = 4;
632
c57168a1 633 for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off;
3588c5a2 634 k < len;
c57168a1
HR
635 k += off, desc += off) {
636 u16 group_id = get_unaligned_be16(&desc[2]);
637
638 spin_lock_irqsave(&port_group_lock, flags);
639 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
640 group_id);
641 spin_unlock_irqrestore(&port_group_lock, flags);
642 if (tmp_pg) {
643 if (spin_trylock_irqsave(&tmp_pg->lock, flags)) {
644 if ((tmp_pg == pg) ||
645 !(tmp_pg->flags & ALUA_PG_RUNNING)) {
cb0a168c
HR
646 struct alua_dh_data *h;
647
c57168a1
HR
648 tmp_pg->state = desc[0] & 0x0f;
649 tmp_pg->pref = desc[0] >> 7;
cb0a168c
HR
650 rcu_read_lock();
651 list_for_each_entry_rcu(h,
652 &tmp_pg->dh_list, node) {
653 /* h->sdev should always be valid */
654 BUG_ON(!h->sdev);
655 h->sdev->access_state = desc[0];
656 }
657 rcu_read_unlock();
c57168a1
HR
658 }
659 if (tmp_pg == pg)
80c716fa 660 tmp_pg->valid_states = desc[1];
c57168a1
HR
661 spin_unlock_irqrestore(&tmp_pg->lock, flags);
662 }
663 kref_put(&tmp_pg->kref, release_port_group);
057ea7c9 664 }
c57168a1 665 off = 8 + (desc[7] * 4);
057ea7c9
HR
666 }
667
c57168a1 668 spin_lock_irqsave(&pg->lock, flags);
057ea7c9 669 sdev_printk(KERN_INFO, sdev,
dcd3a754 670 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
43394c67
HR
671 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
672 pg->pref ? "preferred" : "non-preferred",
80c716fa
HR
673 pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
674 pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
675 pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
676 pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
677 pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
678 pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
679 pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
057ea7c9 680
43394c67 681 switch (pg->state) {
5115fc7e 682 case SCSI_ACCESS_STATE_TRANSITIONING:
03197b61 683 if (time_before(jiffies, pg->expiry)) {
28261402 684 /* State transition, retry */
03197b61
HR
685 pg->interval = 2;
686 err = SCSI_DH_RETRY;
687 } else {
cb0a168c
HR
688 struct alua_dh_data *h;
689
03197b61
HR
690 /* Transitioning time exceeded, set port to standby */
691 err = SCSI_DH_IO;
5115fc7e 692 pg->state = SCSI_ACCESS_STATE_STANDBY;
03197b61 693 pg->expiry = 0;
cb0a168c
HR
694 rcu_read_lock();
695 list_for_each_entry_rcu(h, &pg->dh_list, node) {
696 BUG_ON(!h->sdev);
697 h->sdev->access_state =
698 (pg->state & SCSI_ACCESS_STATE_MASK);
699 if (pg->pref)
700 h->sdev->access_state |=
701 SCSI_ACCESS_STATE_PREFERRED;
702 }
703 rcu_read_unlock();
057ea7c9 704 }
69723d17 705 break;
5115fc7e 706 case SCSI_ACCESS_STATE_OFFLINE:
e47f8976 707 /* Path unusable */
69723d17 708 err = SCSI_DH_DEV_OFFLINED;
03197b61 709 pg->expiry = 0;
69723d17
HR
710 break;
711 default:
712 /* Useable path if active */
713 err = SCSI_DH_OK;
03197b61 714 pg->expiry = 0;
69723d17 715 break;
057ea7c9 716 }
c57168a1 717 spin_unlock_irqrestore(&pg->lock, flags);
c49c8345 718 kfree(buff);
057ea7c9
HR
719 return err;
720}
721
f2ecf13a
HR
722/*
723 * alua_stpg - Issue a SET TARGET PORT GROUP command
724 *
725 * Issue a SET TARGET PORT GROUP command and evaluate the
b2460756
HR
726 * response. Returns SCSI_DH_RETRY per default to trigger
727 * a re-evaluation of the target group state or SCSI_DH_OK
728 * if no further action needs to be taken.
f2ecf13a 729 */
43394c67 730static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
f2ecf13a 731{
b2460756
HR
732 int retval;
733 struct scsi_sense_hdr sense_hdr;
f2ecf13a 734
43394c67 735 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
b2460756
HR
736 /* Only implicit ALUA supported, retry */
737 return SCSI_DH_RETRY;
738 }
43394c67 739 switch (pg->state) {
5115fc7e 740 case SCSI_ACCESS_STATE_OPTIMAL:
b2460756 741 return SCSI_DH_OK;
5115fc7e 742 case SCSI_ACCESS_STATE_ACTIVE:
43394c67
HR
743 if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
744 !pg->pref &&
745 (pg->tpgs & TPGS_MODE_IMPLICIT))
b2460756 746 return SCSI_DH_OK;
f2ecf13a 747 break;
5115fc7e
HR
748 case SCSI_ACCESS_STATE_STANDBY:
749 case SCSI_ACCESS_STATE_UNAVAILABLE:
f2ecf13a 750 break;
5115fc7e 751 case SCSI_ACCESS_STATE_OFFLINE:
b2460756 752 return SCSI_DH_IO;
5115fc7e 753 case SCSI_ACCESS_STATE_TRANSITIONING:
f2ecf13a
HR
754 break;
755 default:
b2460756
HR
756 sdev_printk(KERN_INFO, sdev,
757 "%s: stpg failed, unhandled TPGS state %d",
43394c67 758 ALUA_DH_NAME, pg->state);
b2460756 759 return SCSI_DH_NOSYS;
f2ecf13a 760 }
43394c67 761 retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
f2ecf13a 762
b2460756 763 if (retval) {
40bb61a7 764 if (!scsi_sense_valid(&sense_hdr)) {
b2460756
HR
765 sdev_printk(KERN_INFO, sdev,
766 "%s: stpg failed, result %d",
767 ALUA_DH_NAME, retval);
40bb61a7 768 if (driver_byte(retval) == DRIVER_ERROR)
b2460756
HR
769 return SCSI_DH_DEV_TEMP_BUSY;
770 } else {
43394c67 771 sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
b2460756
HR
772 ALUA_DH_NAME);
773 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
774 }
f2ecf13a 775 }
b2460756
HR
776 /* Retry RTPG */
777 return SCSI_DH_RETRY;
f2ecf13a
HR
778}
779
03197b61
HR
780static void alua_rtpg_work(struct work_struct *work)
781{
782 struct alua_port_group *pg =
783 container_of(work, struct alua_port_group, rtpg_work.work);
784 struct scsi_device *sdev;
785 LIST_HEAD(qdata_list);
786 int err = SCSI_DH_OK;
787 struct alua_queue_data *qdata, *tmp;
788 unsigned long flags;
789
790 spin_lock_irqsave(&pg->lock, flags);
791 sdev = pg->rtpg_sdev;
792 if (!sdev) {
793 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
794 WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
795 spin_unlock_irqrestore(&pg->lock, flags);
1fdd1427 796 kref_put(&pg->kref, release_port_group);
03197b61
HR
797 return;
798 }
799 pg->flags |= ALUA_PG_RUNNING;
800 if (pg->flags & ALUA_PG_RUN_RTPG) {
9d2c3039
HR
801 int state = pg->state;
802
03197b61
HR
803 pg->flags &= ~ALUA_PG_RUN_RTPG;
804 spin_unlock_irqrestore(&pg->lock, flags);
5115fc7e 805 if (state == SCSI_ACCESS_STATE_TRANSITIONING) {
9d2c3039
HR
806 if (alua_tur(sdev) == SCSI_DH_RETRY) {
807 spin_lock_irqsave(&pg->lock, flags);
808 pg->flags &= ~ALUA_PG_RUNNING;
809 pg->flags |= ALUA_PG_RUN_RTPG;
810 spin_unlock_irqrestore(&pg->lock, flags);
6934be4f 811 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
9d2c3039
HR
812 pg->interval * HZ);
813 return;
814 }
815 /* Send RTPG on failure or if TUR indicates SUCCESS */
816 }
03197b61
HR
817 err = alua_rtpg(sdev, pg);
818 spin_lock_irqsave(&pg->lock, flags);
2b35865e 819 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
03197b61
HR
820 pg->flags &= ~ALUA_PG_RUNNING;
821 pg->flags |= ALUA_PG_RUN_RTPG;
822 spin_unlock_irqrestore(&pg->lock, flags);
6934be4f 823 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
03197b61
HR
824 pg->interval * HZ);
825 return;
826 }
827 if (err != SCSI_DH_OK)
828 pg->flags &= ~ALUA_PG_RUN_STPG;
829 }
830 if (pg->flags & ALUA_PG_RUN_STPG) {
831 pg->flags &= ~ALUA_PG_RUN_STPG;
832 spin_unlock_irqrestore(&pg->lock, flags);
833 err = alua_stpg(sdev, pg);
834 spin_lock_irqsave(&pg->lock, flags);
2b35865e 835 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
03197b61
HR
836 pg->flags |= ALUA_PG_RUN_RTPG;
837 pg->interval = 0;
838 pg->flags &= ~ALUA_PG_RUNNING;
839 spin_unlock_irqrestore(&pg->lock, flags);
6934be4f 840 queue_delayed_work(kaluad_wq, &pg->rtpg_work,
03197b61
HR
841 pg->interval * HZ);
842 return;
843 }
844 }
845
846 list_splice_init(&pg->rtpg_list, &qdata_list);
847 pg->rtpg_sdev = NULL;
848 spin_unlock_irqrestore(&pg->lock, flags);
849
850 list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) {
851 list_del(&qdata->entry);
852 if (qdata->callback_fn)
853 qdata->callback_fn(qdata->callback_data, err);
854 kfree(qdata);
855 }
856 spin_lock_irqsave(&pg->lock, flags);
857 pg->flags &= ~ALUA_PG_RUNNING;
858 spin_unlock_irqrestore(&pg->lock, flags);
859 scsi_device_put(sdev);
860 kref_put(&pg->kref, release_port_group);
861}
862
7cb689fe
BVA
863/**
864 * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
f5572475
BVA
865 * @pg: ALUA port group associated with @sdev.
866 * @sdev: SCSI device for which to submit an RTPG.
867 * @qdata: Information about the callback to invoke after the RTPG.
868 * @force: Whether or not to submit an RTPG if a work item that will submit an
869 * RTPG already has been scheduled.
7cb689fe
BVA
870 *
871 * Returns true if and only if alua_rtpg_work() will be called asynchronously.
872 * That function is responsible for calling @qdata->fn().
873 */
874static bool alua_rtpg_queue(struct alua_port_group *pg,
03197b61 875 struct scsi_device *sdev,
2b35865e 876 struct alua_queue_data *qdata, bool force)
03197b61
HR
877{
878 int start_queue = 0;
879 unsigned long flags;
0aeccdfe 880 if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev))
7cb689fe 881 return false;
03197b61
HR
882
883 spin_lock_irqsave(&pg->lock, flags);
884 if (qdata) {
885 list_add_tail(&qdata->entry, &pg->rtpg_list);
886 pg->flags |= ALUA_PG_RUN_STPG;
2b35865e 887 force = true;
03197b61
HR
888 }
889 if (pg->rtpg_sdev == NULL) {
890 pg->interval = 0;
891 pg->flags |= ALUA_PG_RUN_RTPG;
892 kref_get(&pg->kref);
893 pg->rtpg_sdev = sdev;
03197b61 894 start_queue = 1;
2b35865e
HR
895 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
896 pg->flags |= ALUA_PG_RUN_RTPG;
897 /* Do not queue if the worker is already running */
898 if (!(pg->flags & ALUA_PG_RUNNING)) {
899 kref_get(&pg->kref);
900 start_queue = 1;
901 }
03197b61 902 }
2b35865e 903
03197b61
HR
904 spin_unlock_irqrestore(&pg->lock, flags);
905
625fe857 906 if (start_queue) {
6934be4f 907 if (queue_delayed_work(kaluad_wq, &pg->rtpg_work,
625fe857
BVA
908 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
909 sdev = NULL;
910 else
911 kref_put(&pg->kref, release_port_group);
03197b61 912 }
625fe857
BVA
913 if (sdev)
914 scsi_device_put(sdev);
7cb689fe
BVA
915
916 return true;
03197b61
HR
917}
918
057ea7c9
HR
919/*
920 * alua_initialize - Initialize ALUA state
921 * @sdev: the device to be initialized
922 *
923 * For the prep_fn to work correctly we have
924 * to initialize the ALUA state for the device.
925 */
926static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
927{
43394c67 928 int err = SCSI_DH_DEV_UNSUPP, tpgs;
057ea7c9 929
03197b61 930 mutex_lock(&h->init_mutex);
43394c67 931 tpgs = alua_check_tpgs(sdev);
a4253fde
HR
932 if (tpgs != TPGS_MODE_NONE)
933 err = alua_check_vpd(sdev, h, tpgs);
03197b61
HR
934 h->init_error = err;
935 mutex_unlock(&h->init_mutex);
057ea7c9
HR
936 return err;
937}
4335d092
MB
938/*
939 * alua_set_params - set/unset the optimize flag
940 * @sdev: device on the path to be activated
941 * params - parameters in the following format
942 * "no_of_params\0param1\0param2\0param3\0...\0"
943 * For example, to set the flag pass the following parameters
944 * from multipath.conf
945 * hardware_handler "2 alua 1"
946 */
947static int alua_set_params(struct scsi_device *sdev, const char *params)
948{
ee14c674 949 struct alua_dh_data *h = sdev->handler_data;
d29425b0 950 struct alua_port_group *pg = NULL;
4335d092
MB
951 unsigned int optimize = 0, argc;
952 const char *p = params;
953 int result = SCSI_DH_OK;
03197b61 954 unsigned long flags;
4335d092
MB
955
956 if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
957 return -EINVAL;
958
959 while (*p++)
960 ;
961 if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
962 return -EINVAL;
963
03197b61
HR
964 rcu_read_lock();
965 pg = rcu_dereference(h->pg);
966 if (!pg) {
967 rcu_read_unlock();
43394c67 968 return -ENXIO;
03197b61
HR
969 }
970 spin_lock_irqsave(&pg->lock, flags);
4335d092 971 if (optimize)
43394c67 972 pg->flags |= ALUA_OPTIMIZE_STPG;
4335d092 973 else
43394c67 974 pg->flags &= ~ALUA_OPTIMIZE_STPG;
03197b61
HR
975 spin_unlock_irqrestore(&pg->lock, flags);
976 rcu_read_unlock();
4335d092
MB
977
978 return result;
979}
057ea7c9
HR
980
981/*
982 * alua_activate - activate a path
983 * @sdev: device on the path to be activated
984 *
985 * We're currently switching the port group to be activated only and
986 * let the array figure out the rest.
987 * There may be other arrays which require us to switch all port groups
988 * based on a certain policy. But until we actually encounter them it
989 * should be okay.
990 */
3ae31f6a
CS
991static int alua_activate(struct scsi_device *sdev,
992 activate_complete fn, void *data)
057ea7c9 993{
ee14c674 994 struct alua_dh_data *h = sdev->handler_data;
057ea7c9 995 int err = SCSI_DH_OK;
03197b61 996 struct alua_queue_data *qdata;
d29425b0 997 struct alua_port_group *pg;
057ea7c9 998
03197b61
HR
999 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
1000 if (!qdata) {
1001 err = SCSI_DH_RES_TEMP_UNAVAIL;
46ccf6b5 1002 goto out;
03197b61
HR
1003 }
1004 qdata->callback_fn = fn;
1005 qdata->callback_data = data;
1006
1007 mutex_lock(&h->init_mutex);
1008 rcu_read_lock();
1009 pg = rcu_dereference(h->pg);
1010 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1011 rcu_read_unlock();
1012 kfree(qdata);
1013 err = h->init_error;
1014 mutex_unlock(&h->init_mutex);
43394c67
HR
1015 goto out;
1016 }
03197b61
HR
1017 rcu_read_unlock();
1018 mutex_unlock(&h->init_mutex);
1019
7cb689fe
BVA
1020 if (alua_rtpg_queue(pg, sdev, qdata, true))
1021 fn = NULL;
1022 else
1023 err = SCSI_DH_DEV_OFFLINED;
03197b61 1024 kref_put(&pg->kref, release_port_group);
057ea7c9 1025out:
b2460756 1026 if (fn)
3ae31f6a
CS
1027 fn(data, err);
1028 return 0;
057ea7c9
HR
1029}
1030
2b35865e
HR
1031/*
1032 * alua_check - check path status
1033 * @sdev: device on the path to be checked
1034 *
1035 * Check the device status
1036 */
1037static void alua_check(struct scsi_device *sdev, bool force)
1038{
1039 struct alua_dh_data *h = sdev->handler_data;
1040 struct alua_port_group *pg;
1041
1042 rcu_read_lock();
1043 pg = rcu_dereference(h->pg);
1044 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1045 rcu_read_unlock();
1046 return;
1047 }
1048 rcu_read_unlock();
1049
1050 alua_rtpg_queue(pg, sdev, NULL, force);
1051 kref_put(&pg->kref, release_port_group);
1052}
1053
057ea7c9
HR
1054/*
1055 * alua_prep_fn - request callback
1056 *
1057 * Fail I/O to all paths not in state
1058 * active/optimized or active/non-optimized.
1059 */
4c1cb67c 1060static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req)
057ea7c9 1061{
ee14c674 1062 struct alua_dh_data *h = sdev->handler_data;
d29425b0 1063 struct alua_port_group *pg;
5115fc7e 1064 unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
057ea7c9 1065
03197b61
HR
1066 rcu_read_lock();
1067 pg = rcu_dereference(h->pg);
1068 if (pg)
1069 state = pg->state;
1070 rcu_read_unlock();
4c1cb67c
CH
1071
1072 switch (state) {
1073 case SCSI_ACCESS_STATE_OPTIMAL:
1074 case SCSI_ACCESS_STATE_ACTIVE:
1075 case SCSI_ACCESS_STATE_LBA:
1076 return BLK_STS_OK;
1077 case SCSI_ACCESS_STATE_TRANSITIONING:
1078 return BLK_STS_RESOURCE;
1079 default:
e8064021 1080 req->rq_flags |= RQF_QUIET;
4c1cb67c 1081 return BLK_STS_IOERR;
057ea7c9 1082 }
057ea7c9
HR
1083}
1084
d3d32891
HR
1085static void alua_rescan(struct scsi_device *sdev)
1086{
1087 struct alua_dh_data *h = sdev->handler_data;
1088
1089 alua_initialize(sdev, h);
1090}
1091
057ea7c9
HR
1092/*
1093 * alua_bus_attach - Attach device handler
1094 * @sdev: device to be attached to
1095 */
ee14c674 1096static int alua_bus_attach(struct scsi_device *sdev)
057ea7c9 1097{
057ea7c9 1098 struct alua_dh_data *h;
2a8f7a03 1099 int err;
057ea7c9 1100
cd37743f 1101 h = kzalloc(sizeof(*h) , GFP_KERNEL);
1d520328 1102 if (!h)
2a8f7a03 1103 return SCSI_DH_NOMEM;
03197b61
HR
1104 spin_lock_init(&h->pg_lock);
1105 rcu_assign_pointer(h->pg, NULL);
1106 h->init_error = SCSI_DH_OK;
96e65865 1107 h->sdev = sdev;
cb0a168c 1108 INIT_LIST_HEAD(&h->node);
057ea7c9 1109
03197b61 1110 mutex_init(&h->init_mutex);
057ea7c9 1111 err = alua_initialize(sdev, h);
1d520328 1112 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
057ea7c9
HR
1113 goto failed;
1114
ee14c674 1115 sdev->handler_data = h;
2a8f7a03 1116 return SCSI_DH_OK;
057ea7c9 1117failed:
cd37743f 1118 kfree(h);
2a8f7a03 1119 return err;
057ea7c9
HR
1120}
1121
1122/*
1123 * alua_bus_detach - Detach device handler
1124 * @sdev: device to be detached from
1125 */
1126static void alua_bus_detach(struct scsi_device *sdev)
1127{
ee14c674 1128 struct alua_dh_data *h = sdev->handler_data;
03197b61
HR
1129 struct alua_port_group *pg;
1130
1131 spin_lock(&h->pg_lock);
d29425b0 1132 pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock));
03197b61
HR
1133 rcu_assign_pointer(h->pg, NULL);
1134 h->sdev = NULL;
1135 spin_unlock(&h->pg_lock);
cb0a168c 1136 if (pg) {
38c31599 1137 spin_lock_irq(&pg->lock);
cb0a168c 1138 list_del_rcu(&h->node);
38c31599 1139 spin_unlock_irq(&pg->lock);
03197b61 1140 kref_put(&pg->kref, release_port_group);
cb0a168c 1141 }
ee14c674 1142 sdev->handler_data = NULL;
cd37743f 1143 kfree(h);
057ea7c9
HR
1144}
1145
1d520328
CH
1146static struct scsi_device_handler alua_dh = {
1147 .name = ALUA_DH_NAME,
1148 .module = THIS_MODULE,
1149 .attach = alua_bus_attach,
1150 .detach = alua_bus_detach,
1151 .prep_fn = alua_prep_fn,
1152 .check_sense = alua_check_sense,
1153 .activate = alua_activate,
d3d32891 1154 .rescan = alua_rescan,
1d520328 1155 .set_params = alua_set_params,
1d520328
CH
1156};
1157
057ea7c9
HR
1158static int __init alua_init(void)
1159{
1160 int r;
1161
03197b61 1162 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
12e750bc
Y
1163 if (!kaluad_wq)
1164 return -ENOMEM;
6934be4f 1165
057ea7c9 1166 r = scsi_register_device_handler(&alua_dh);
03197b61 1167 if (r != 0) {
057ea7c9
HR
1168 printk(KERN_ERR "%s: Failed to register scsi device handler",
1169 ALUA_DH_NAME);
03197b61
HR
1170 destroy_workqueue(kaluad_wq);
1171 }
057ea7c9
HR
1172 return r;
1173}
1174
1175static void __exit alua_exit(void)
1176{
1177 scsi_unregister_device_handler(&alua_dh);
03197b61 1178 destroy_workqueue(kaluad_wq);
057ea7c9
HR
1179}
1180
1181module_init(alua_init);
1182module_exit(alua_exit);
1183
1184MODULE_DESCRIPTION("DM Multipath ALUA support");
1185MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
1186MODULE_LICENSE("GPL");
1187MODULE_VERSION(ALUA_DH_VER);