Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / drivers / scsi / device_handler / scsi_dh_alua.c
CommitLineData
057ea7c9
HR
1/*
2 * Generic SCSI-3 ALUA SCSI Device Handler
3 *
69723d17 4 * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH.
057ea7c9
HR
5 * All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 *
21 */
5a0e3ad6 22#include <linux/slab.h>
69723d17 23#include <linux/delay.h>
acf3368f 24#include <linux/module.h>
a7089770 25#include <asm/unaligned.h>
057ea7c9 26#include <scsi/scsi.h>
cb0a168c 27#include <scsi/scsi_proto.h>
80bd68d6 28#include <scsi/scsi_dbg.h>
057ea7c9
HR
29#include <scsi/scsi_eh.h>
30#include <scsi/scsi_dh.h>
31
32#define ALUA_DH_NAME "alua"
e79c82cc 33#define ALUA_DH_VER "2.0"
057ea7c9 34
057ea7c9
HR
35#define TPGS_SUPPORT_NONE 0x00
36#define TPGS_SUPPORT_OPTIMIZED 0x01
37#define TPGS_SUPPORT_NONOPTIMIZED 0x02
38#define TPGS_SUPPORT_STANDBY 0x04
39#define TPGS_SUPPORT_UNAVAILABLE 0x08
69723d17 40#define TPGS_SUPPORT_LBA_DEPENDENT 0x10
057ea7c9
HR
41#define TPGS_SUPPORT_OFFLINE 0x40
42#define TPGS_SUPPORT_TRANSITION 0x80
43
3588c5a2
RE
44#define RTPG_FMT_MASK 0x70
45#define RTPG_FMT_EXT_HDR 0x10
46
057ea7c9
HR
47#define TPGS_MODE_UNINITIALIZED -1
48#define TPGS_MODE_NONE 0x0
49#define TPGS_MODE_IMPLICIT 0x1
50#define TPGS_MODE_EXPLICIT 0x2
51
c49c8345 52#define ALUA_RTPG_SIZE 128
3588c5a2 53#define ALUA_FAILOVER_TIMEOUT 60
057ea7c9 54#define ALUA_FAILOVER_RETRIES 5
03197b61 55#define ALUA_RTPG_DELAY_MSECS 5
057ea7c9 56
6c4fc044 57/* device handler flags */
03197b61
HR
58#define ALUA_OPTIMIZE_STPG 0x01
59#define ALUA_RTPG_EXT_HDR_UNSUPP 0x02
00642a1b 60#define ALUA_SYNC_STPG 0x04
03197b61
HR
61/* State machine flags */
62#define ALUA_PG_RUN_RTPG 0x10
63#define ALUA_PG_RUN_STPG 0x20
64#define ALUA_PG_RUNNING 0x40
4335d092 65
aa90f490
HR
66static uint optimize_stpg;
67module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR);
68MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0.");
69
43394c67
HR
70static LIST_HEAD(port_group_list);
71static DEFINE_SPINLOCK(port_group_lock);
03197b61 72static struct workqueue_struct *kaluad_wq;
00642a1b 73static struct workqueue_struct *kaluad_sync_wq;
43394c67
HR
74
75struct alua_port_group {
76 struct kref kref;
03197b61 77 struct rcu_head rcu;
43394c67 78 struct list_head node;
cb0a168c 79 struct list_head dh_list;
0047220c
HR
80 unsigned char device_id_str[256];
81 int device_id_len;
057ea7c9 82 int group_id;
057ea7c9
HR
83 int tpgs;
84 int state;
dcd3a754 85 int pref;
4335d092 86 unsigned flags; /* used for optimizing STPG */
3588c5a2 87 unsigned char transition_tmo;
03197b61
HR
88 unsigned long expiry;
89 unsigned long interval;
90 struct delayed_work rtpg_work;
91 spinlock_t lock;
92 struct list_head rtpg_list;
93 struct scsi_device *rtpg_sdev;
43394c67
HR
94};
95
96struct alua_dh_data {
cb0a168c 97 struct list_head node;
43394c67
HR
98 struct alua_port_group *pg;
99 int group_id;
03197b61 100 spinlock_t pg_lock;
96e65865 101 struct scsi_device *sdev;
03197b61
HR
102 int init_error;
103 struct mutex init_mutex;
104};
105
106struct alua_queue_data {
107 struct list_head entry;
96e65865
CS
108 activate_complete callback_fn;
109 void *callback_data;
057ea7c9
HR
110};
111
112#define ALUA_POLICY_SWITCH_CURRENT 0
113#define ALUA_POLICY_SWITCH_ALL 1
114
03197b61
HR
115static void alua_rtpg_work(struct work_struct *work);
116static void alua_rtpg_queue(struct alua_port_group *pg,
117 struct scsi_device *sdev,
2b35865e
HR
118 struct alua_queue_data *qdata, bool force);
119static void alua_check(struct scsi_device *sdev, bool force);
96e65865 120
43394c67
HR
121static void release_port_group(struct kref *kref)
122{
123 struct alua_port_group *pg;
124
125 pg = container_of(kref, struct alua_port_group, kref);
03197b61
HR
126 if (pg->rtpg_sdev)
127 flush_delayed_work(&pg->rtpg_work);
43394c67
HR
128 spin_lock(&port_group_lock);
129 list_del(&pg->node);
130 spin_unlock(&port_group_lock);
03197b61 131 kfree_rcu(pg, rcu);
43394c67
HR
132}
133
057ea7c9
HR
134/*
135 * submit_rtpg - Issue a REPORT TARGET GROUP STATES command
136 * @sdev: sdev the command should be sent to
137 */
40bb61a7
HR
138static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff,
139 int bufflen, struct scsi_sense_hdr *sshdr, int flags)
057ea7c9 140{
40bb61a7
HR
141 u8 cdb[COMMAND_SIZE(MAINTENANCE_IN)];
142 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
143 REQ_FAILFAST_DRIVER;
057ea7c9
HR
144
145 /* Prepare the command. */
40bb61a7
HR
146 memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_IN));
147 cdb[0] = MAINTENANCE_IN;
d42ae5f3 148 if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP))
40bb61a7 149 cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT;
8e67ce60 150 else
40bb61a7
HR
151 cdb[1] = MI_REPORT_TARGET_PGS;
152 put_unaligned_be32(bufflen, &cdb[6]);
153
154 return scsi_execute_req_flags(sdev, cdb, DMA_FROM_DEVICE,
155 buff, bufflen, sshdr,
156 ALUA_FAILOVER_TIMEOUT * HZ,
157 ALUA_FAILOVER_RETRIES, NULL, req_flags);
057ea7c9
HR
158}
159
96e65865 160/*
b2460756 161 * submit_stpg - Issue a SET TARGET PORT GROUP command
057ea7c9
HR
162 *
163 * Currently we're only setting the current target port group state
164 * to 'active/optimized' and let the array firmware figure out
165 * the states of the remaining groups.
166 */
40bb61a7
HR
167static int submit_stpg(struct scsi_device *sdev, int group_id,
168 struct scsi_sense_hdr *sshdr)
057ea7c9 169{
40bb61a7 170 u8 cdb[COMMAND_SIZE(MAINTENANCE_OUT)];
b2460756 171 unsigned char stpg_data[8];
057ea7c9 172 int stpg_len = 8;
40bb61a7
HR
173 int req_flags = REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
174 REQ_FAILFAST_DRIVER;
057ea7c9
HR
175
176 /* Prepare the data buffer */
b2460756 177 memset(stpg_data, 0, stpg_len);
5115fc7e 178 stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL;
b2460756 179 put_unaligned_be16(group_id, &stpg_data[6]);
057ea7c9 180
057ea7c9 181 /* Prepare the command. */
40bb61a7
HR
182 memset(cdb, 0x0, COMMAND_SIZE(MAINTENANCE_OUT));
183 cdb[0] = MAINTENANCE_OUT;
184 cdb[1] = MO_SET_TARGET_PGS;
185 put_unaligned_be32(stpg_len, &cdb[6]);
186
187 return scsi_execute_req_flags(sdev, cdb, DMA_TO_DEVICE,
188 stpg_data, stpg_len,
189 sshdr, ALUA_FAILOVER_TIMEOUT * HZ,
190 ALUA_FAILOVER_RETRIES, NULL, req_flags);
057ea7c9
HR
191}
192
0047220c
HR
193struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size,
194 int group_id)
195{
196 struct alua_port_group *pg;
197
198 list_for_each_entry(pg, &port_group_list, node) {
199 if (pg->group_id != group_id)
200 continue;
201 if (pg->device_id_len != id_size)
202 continue;
203 if (strncmp(pg->device_id_str, id_str, id_size))
204 continue;
205 if (!kref_get_unless_zero(&pg->kref))
206 continue;
207 return pg;
208 }
209
210 return NULL;
211}
212
43394c67
HR
213/*
214 * alua_alloc_pg - Allocate a new port_group structure
215 * @sdev: scsi device
216 * @h: alua device_handler data
217 * @group_id: port group id
218 *
219 * Allocate a new port_group structure for a given
220 * device.
221 */
222struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev,
223 int group_id, int tpgs)
224{
0047220c 225 struct alua_port_group *pg, *tmp_pg;
43394c67
HR
226
227 pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL);
228 if (!pg)
0047220c 229 return ERR_PTR(-ENOMEM);
43394c67 230
0047220c
HR
231 pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str,
232 sizeof(pg->device_id_str));
233 if (pg->device_id_len <= 0) {
234 /*
235 * Internal error: TPGS supported but no device
236 * identifcation found. Disable ALUA support.
237 */
238 kfree(pg);
239 sdev_printk(KERN_INFO, sdev,
240 "%s: No device descriptors found\n",
241 ALUA_DH_NAME);
242 return ERR_PTR(-ENXIO);
243 }
43394c67
HR
244 pg->group_id = group_id;
245 pg->tpgs = tpgs;
5115fc7e 246 pg->state = SCSI_ACCESS_STATE_OPTIMAL;
aa90f490
HR
247 if (optimize_stpg)
248 pg->flags |= ALUA_OPTIMIZE_STPG;
43394c67 249 kref_init(&pg->kref);
03197b61
HR
250 INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work);
251 INIT_LIST_HEAD(&pg->rtpg_list);
252 INIT_LIST_HEAD(&pg->node);
cb0a168c 253 INIT_LIST_HEAD(&pg->dh_list);
03197b61 254 spin_lock_init(&pg->lock);
0047220c 255
43394c67 256 spin_lock(&port_group_lock);
0047220c
HR
257 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
258 group_id);
259 if (tmp_pg) {
260 spin_unlock(&port_group_lock);
261 kfree(pg);
262 return tmp_pg;
263 }
264
43394c67
HR
265 list_add(&pg->node, &port_group_list);
266 spin_unlock(&port_group_lock);
267
268 return pg;
269}
270
057ea7c9 271/*
d7c48feb 272 * alua_check_tpgs - Evaluate TPGS setting
057ea7c9
HR
273 * @sdev: device to be checked
274 *
d7c48feb 275 * Examine the TPGS setting of the sdev to find out if ALUA
057ea7c9
HR
276 * is supported.
277 */
ad0ea64c 278static int alua_check_tpgs(struct scsi_device *sdev)
057ea7c9 279{
ad0ea64c 280 int tpgs = TPGS_MODE_NONE;
057ea7c9 281
db5a6a60
HR
282 /*
283 * ALUA support for non-disk devices is fraught with
284 * difficulties, so disable it for now.
285 */
286 if (sdev->type != TYPE_DISK) {
db5a6a60
HR
287 sdev_printk(KERN_INFO, sdev,
288 "%s: disable for non-disk devices\n",
289 ALUA_DH_NAME);
ad0ea64c 290 return tpgs;
db5a6a60
HR
291 }
292
ad0ea64c
HR
293 tpgs = scsi_device_tpgs(sdev);
294 switch (tpgs) {
057ea7c9
HR
295 case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT:
296 sdev_printk(KERN_INFO, sdev,
297 "%s: supports implicit and explicit TPGS\n",
298 ALUA_DH_NAME);
299 break;
300 case TPGS_MODE_EXPLICIT:
301 sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n",
302 ALUA_DH_NAME);
303 break;
304 case TPGS_MODE_IMPLICIT:
305 sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n",
306 ALUA_DH_NAME);
307 break;
6cc05d45 308 case TPGS_MODE_NONE:
057ea7c9
HR
309 sdev_printk(KERN_INFO, sdev, "%s: not supported\n",
310 ALUA_DH_NAME);
057ea7c9 311 break;
6cc05d45
HR
312 default:
313 sdev_printk(KERN_INFO, sdev,
314 "%s: unsupported TPGS setting %d\n",
ad0ea64c
HR
315 ALUA_DH_NAME, tpgs);
316 tpgs = TPGS_MODE_NONE;
6cc05d45 317 break;
057ea7c9
HR
318 }
319
ad0ea64c 320 return tpgs;
057ea7c9
HR
321}
322
323/*
9b80dcec 324 * alua_check_vpd - Evaluate INQUIRY vpd page 0x83
057ea7c9
HR
325 * @sdev: device to be checked
326 *
327 * Extract the relative target port and the target port group
328 * descriptor from the list of identificators.
329 */
a4253fde
HR
330static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h,
331 int tpgs)
057ea7c9 332{
83ea0e5e 333 int rel_port = -1, group_id;
03197b61 334 struct alua_port_group *pg, *old_pg = NULL;
100bcb85 335 bool pg_updated = false;
cb0a168c 336 unsigned long flags;
057ea7c9 337
83ea0e5e
HR
338 group_id = scsi_vpd_tpg_id(sdev, &rel_port);
339 if (group_id < 0) {
057ea7c9
HR
340 /*
341 * Internal error; TPGS supported but required
342 * VPD identification descriptors not present.
343 * Disable ALUA support
344 */
345 sdev_printk(KERN_INFO, sdev,
346 "%s: No target port descriptors found\n",
347 ALUA_DH_NAME);
9b80dcec 348 return SCSI_DH_DEV_UNSUPP;
057ea7c9 349 }
a4253fde 350
03197b61
HR
351 pg = alua_alloc_pg(sdev, group_id, tpgs);
352 if (IS_ERR(pg)) {
353 if (PTR_ERR(pg) == -ENOMEM)
a4253fde
HR
354 return SCSI_DH_NOMEM;
355 return SCSI_DH_DEV_UNSUPP;
356 }
9b80dcec 357 sdev_printk(KERN_INFO, sdev,
a4253fde 358 "%s: device %s port group %x rel port %x\n",
03197b61
HR
359 ALUA_DH_NAME, pg->device_id_str, group_id, rel_port);
360
361 /* Check for existing port group references */
362 spin_lock(&h->pg_lock);
363 old_pg = h->pg;
364 if (old_pg != pg) {
365 /* port group has changed. Update to new port group */
cb0a168c
HR
366 if (h->pg) {
367 spin_lock_irqsave(&old_pg->lock, flags);
368 list_del_rcu(&h->node);
369 spin_unlock_irqrestore(&old_pg->lock, flags);
370 }
03197b61 371 rcu_assign_pointer(h->pg, pg);
cb0a168c 372 pg_updated = true;
03197b61 373 }
cb0a168c
HR
374
375 spin_lock_irqsave(&pg->lock, flags);
851cde99
HR
376 if (sdev->synchronous_alua)
377 pg->flags |= ALUA_SYNC_STPG;
cb0a168c
HR
378 if (pg_updated)
379 list_add_rcu(&h->node, &pg->dh_list);
380 spin_unlock_irqrestore(&pg->lock, flags);
381
2b35865e 382 alua_rtpg_queue(h->pg, sdev, NULL, true);
03197b61
HR
383 spin_unlock(&h->pg_lock);
384
385 if (old_pg)
386 kref_put(&old_pg->kref, release_port_group);
057ea7c9 387
03197b61 388 return SCSI_DH_OK;
057ea7c9
HR
389}
390
5115fc7e 391static char print_alua_state(unsigned char state)
057ea7c9
HR
392{
393 switch (state) {
5115fc7e 394 case SCSI_ACCESS_STATE_OPTIMAL:
057ea7c9 395 return 'A';
5115fc7e 396 case SCSI_ACCESS_STATE_ACTIVE:
057ea7c9 397 return 'N';
5115fc7e 398 case SCSI_ACCESS_STATE_STANDBY:
057ea7c9 399 return 'S';
5115fc7e 400 case SCSI_ACCESS_STATE_UNAVAILABLE:
057ea7c9 401 return 'U';
5115fc7e 402 case SCSI_ACCESS_STATE_LBA:
69723d17 403 return 'L';
5115fc7e 404 case SCSI_ACCESS_STATE_OFFLINE:
057ea7c9 405 return 'O';
5115fc7e 406 case SCSI_ACCESS_STATE_TRANSITIONING:
057ea7c9
HR
407 return 'T';
408 default:
409 return 'X';
410 }
411}
412
413static int alua_check_sense(struct scsi_device *sdev,
414 struct scsi_sense_hdr *sense_hdr)
415{
416 switch (sense_hdr->sense_key) {
417 case NOT_READY:
2b35865e 418 if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) {
057ea7c9
HR
419 /*
420 * LUN Not Accessible - ALUA state transition
421 */
2b35865e
HR
422 alua_check(sdev, false);
423 return NEEDS_RETRY;
424 }
057ea7c9
HR
425 break;
426 case UNIT_ATTENTION:
2b35865e 427 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) {
057ea7c9 428 /*
2b35865e
HR
429 * Power On, Reset, or Bus Device Reset.
430 * Might have obscured a state transition,
431 * so schedule a recheck.
057ea7c9 432 */
2b35865e 433 alua_check(sdev, true);
c7dbb627 434 return ADD_TO_MLQUEUE;
2b35865e 435 }
c20ee7b5
SS
436 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04)
437 /*
438 * Device internal reset
439 */
440 return ADD_TO_MLQUEUE;
410f02d8
MB
441 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
442 /*
443 * Mode Parameters Changed
444 */
445 return ADD_TO_MLQUEUE;
2b35865e 446 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) {
057ea7c9
HR
447 /*
448 * ALUA state changed
449 */
2b35865e 450 alua_check(sdev, true);
c7dbb627 451 return ADD_TO_MLQUEUE;
2b35865e
HR
452 }
453 if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) {
057ea7c9
HR
454 /*
455 * Implicit ALUA state transition failed
456 */
2b35865e 457 alua_check(sdev, true);
c7dbb627 458 return ADD_TO_MLQUEUE;
2b35865e 459 }
bf81973a
MB
460 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03)
461 /*
462 * Inquiry data has changed
463 */
464 return ADD_TO_MLQUEUE;
465 if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e)
4d086f6b
IH
466 /*
467 * REPORTED_LUNS_DATA_HAS_CHANGED is reported
468 * when switching controllers on targets like
469 * Intel Multi-Flex. We can just retry.
470 */
471 return ADD_TO_MLQUEUE;
057ea7c9
HR
472 break;
473 }
474
475 return SCSI_RETURN_NOT_HANDLED;
476}
477
9d2c3039
HR
478/*
479 * alua_tur - Send a TEST UNIT READY
480 * @sdev: device to which the TEST UNIT READY command should be send
481 *
482 * Send a TEST UNIT READY to @sdev to figure out the device state
483 * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING,
484 * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise.
485 */
486static int alua_tur(struct scsi_device *sdev)
487{
488 struct scsi_sense_hdr sense_hdr;
489 int retval;
490
491 retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ,
492 ALUA_FAILOVER_RETRIES, &sense_hdr);
493 if (sense_hdr.sense_key == NOT_READY &&
494 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
495 return SCSI_DH_RETRY;
496 else if (retval)
497 return SCSI_DH_IO;
498 else
499 return SCSI_DH_OK;
500}
501
057ea7c9
HR
502/*
503 * alua_rtpg - Evaluate REPORT TARGET GROUP STATES
504 * @sdev: the device to be evaluated.
505 *
506 * Evaluate the Target Port Group State.
507 * Returns SCSI_DH_DEV_OFFLINED if the path is
25985edc 508 * found to be unusable.
057ea7c9 509 */
28261402 510static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
057ea7c9
HR
511{
512 struct scsi_sense_hdr sense_hdr;
c57168a1 513 struct alua_port_group *tmp_pg;
c49c8345 514 int len, k, off, valid_states = 0, bufflen = ALUA_RTPG_SIZE;
c57168a1 515 unsigned char *desc, *buff;
5597cafc 516 unsigned err, retval;
3588c5a2
RE
517 unsigned int tpg_desc_tbl_off;
518 unsigned char orig_transition_tmo;
c57168a1 519 unsigned long flags;
3588c5a2 520
03197b61
HR
521 if (!pg->expiry) {
522 unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ;
523
524 if (pg->transition_tmo)
525 transition_tmo = pg->transition_tmo * HZ;
526
527 pg->expiry = round_jiffies_up(jiffies + transition_tmo);
528 }
057ea7c9 529
c49c8345
HR
530 buff = kzalloc(bufflen, GFP_KERNEL);
531 if (!buff)
532 return SCSI_DH_DEV_TEMP_BUSY;
533
057ea7c9 534 retry:
a4bd8520 535 err = 0;
43394c67 536 retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
40bb61a7 537
5597cafc 538 if (retval) {
40bb61a7 539 if (!scsi_sense_valid(&sense_hdr)) {
5597cafc
HR
540 sdev_printk(KERN_INFO, sdev,
541 "%s: rtpg failed, result %d\n",
542 ALUA_DH_NAME, retval);
c49c8345 543 kfree(buff);
40bb61a7 544 if (driver_byte(retval) == DRIVER_ERROR)
5597cafc 545 return SCSI_DH_DEV_TEMP_BUSY;
057ea7c9 546 return SCSI_DH_IO;
5597cafc 547 }
057ea7c9 548
8e67ce60
RE
549 /*
550 * submit_rtpg() has failed on existing arrays
551 * when requesting extended header info, and
552 * the array doesn't support extended headers,
553 * even though it shouldn't according to T10.
554 * The retry without rtpg_ext_hdr_req set
555 * handles this.
556 */
43394c67 557 if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) &&
8e67ce60
RE
558 sense_hdr.sense_key == ILLEGAL_REQUEST &&
559 sense_hdr.asc == 0x24 && sense_hdr.ascq == 0) {
43394c67 560 pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP;
8e67ce60
RE
561 goto retry;
562 }
e2d817db
HR
563 /*
564 * Retry on ALUA state transition or if any
565 * UNIT ATTENTION occurred.
566 */
567 if (sense_hdr.sense_key == NOT_READY &&
568 sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a)
569 err = SCSI_DH_RETRY;
570 else if (sense_hdr.sense_key == UNIT_ATTENTION)
571 err = SCSI_DH_RETRY;
03197b61
HR
572 if (err == SCSI_DH_RETRY &&
573 pg->expiry != 0 && time_before(jiffies, pg->expiry)) {
80bd68d6
HR
574 sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n",
575 ALUA_DH_NAME);
576 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
03197b61 577 return err;
80bd68d6
HR
578 }
579 sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n",
580 ALUA_DH_NAME);
581 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
c49c8345 582 kfree(buff);
03197b61 583 pg->expiry = 0;
80bd68d6 584 return SCSI_DH_IO;
057ea7c9 585 }
057ea7c9 586
c49c8345 587 len = get_unaligned_be32(&buff[0]) + 4;
057ea7c9 588
c49c8345 589 if (len > bufflen) {
057ea7c9 590 /* Resubmit with the correct length */
c49c8345
HR
591 kfree(buff);
592 bufflen = len;
593 buff = kmalloc(bufflen, GFP_KERNEL);
594 if (!buff) {
057ea7c9 595 sdev_printk(KERN_WARNING, sdev,
cadbd4a5 596 "%s: kmalloc buffer failed\n",__func__);
057ea7c9 597 /* Temporary failure, bypass */
03197b61 598 pg->expiry = 0;
057ea7c9
HR
599 return SCSI_DH_DEV_TEMP_BUSY;
600 }
601 goto retry;
602 }
603
43394c67 604 orig_transition_tmo = pg->transition_tmo;
c49c8345 605 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0)
43394c67 606 pg->transition_tmo = buff[5];
3588c5a2 607 else
43394c67 608 pg->transition_tmo = ALUA_FAILOVER_TIMEOUT;
3588c5a2 609
28261402 610 if (orig_transition_tmo != pg->transition_tmo) {
3588c5a2
RE
611 sdev_printk(KERN_INFO, sdev,
612 "%s: transition timeout set to %d seconds\n",
43394c67 613 ALUA_DH_NAME, pg->transition_tmo);
03197b61 614 pg->expiry = jiffies + pg->transition_tmo * HZ;
3588c5a2
RE
615 }
616
c49c8345 617 if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR)
3588c5a2
RE
618 tpg_desc_tbl_off = 8;
619 else
620 tpg_desc_tbl_off = 4;
621
c57168a1 622 for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off;
3588c5a2 623 k < len;
c57168a1
HR
624 k += off, desc += off) {
625 u16 group_id = get_unaligned_be16(&desc[2]);
626
627 spin_lock_irqsave(&port_group_lock, flags);
628 tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len,
629 group_id);
630 spin_unlock_irqrestore(&port_group_lock, flags);
631 if (tmp_pg) {
632 if (spin_trylock_irqsave(&tmp_pg->lock, flags)) {
633 if ((tmp_pg == pg) ||
634 !(tmp_pg->flags & ALUA_PG_RUNNING)) {
cb0a168c
HR
635 struct alua_dh_data *h;
636
c57168a1
HR
637 tmp_pg->state = desc[0] & 0x0f;
638 tmp_pg->pref = desc[0] >> 7;
cb0a168c
HR
639 rcu_read_lock();
640 list_for_each_entry_rcu(h,
641 &tmp_pg->dh_list, node) {
642 /* h->sdev should always be valid */
643 BUG_ON(!h->sdev);
644 h->sdev->access_state = desc[0];
645 }
646 rcu_read_unlock();
c57168a1
HR
647 }
648 if (tmp_pg == pg)
649 valid_states = desc[1];
650 spin_unlock_irqrestore(&tmp_pg->lock, flags);
651 }
652 kref_put(&tmp_pg->kref, release_port_group);
057ea7c9 653 }
c57168a1 654 off = 8 + (desc[7] * 4);
057ea7c9
HR
655 }
656
c57168a1 657 spin_lock_irqsave(&pg->lock, flags);
057ea7c9 658 sdev_printk(KERN_INFO, sdev,
dcd3a754 659 "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n",
43394c67
HR
660 ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state),
661 pg->pref ? "preferred" : "non-preferred",
057ea7c9
HR
662 valid_states&TPGS_SUPPORT_TRANSITION?'T':'t',
663 valid_states&TPGS_SUPPORT_OFFLINE?'O':'o',
69723d17 664 valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l',
057ea7c9
HR
665 valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u',
666 valid_states&TPGS_SUPPORT_STANDBY?'S':'s',
667 valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n',
668 valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a');
669
43394c67 670 switch (pg->state) {
5115fc7e 671 case SCSI_ACCESS_STATE_TRANSITIONING:
03197b61 672 if (time_before(jiffies, pg->expiry)) {
28261402 673 /* State transition, retry */
03197b61
HR
674 pg->interval = 2;
675 err = SCSI_DH_RETRY;
676 } else {
cb0a168c
HR
677 struct alua_dh_data *h;
678
03197b61
HR
679 /* Transitioning time exceeded, set port to standby */
680 err = SCSI_DH_IO;
5115fc7e 681 pg->state = SCSI_ACCESS_STATE_STANDBY;
03197b61 682 pg->expiry = 0;
cb0a168c
HR
683 rcu_read_lock();
684 list_for_each_entry_rcu(h, &pg->dh_list, node) {
685 BUG_ON(!h->sdev);
686 h->sdev->access_state =
687 (pg->state & SCSI_ACCESS_STATE_MASK);
688 if (pg->pref)
689 h->sdev->access_state |=
690 SCSI_ACCESS_STATE_PREFERRED;
691 }
692 rcu_read_unlock();
057ea7c9 693 }
69723d17 694 break;
5115fc7e 695 case SCSI_ACCESS_STATE_OFFLINE:
e47f8976 696 /* Path unusable */
69723d17 697 err = SCSI_DH_DEV_OFFLINED;
03197b61 698 pg->expiry = 0;
69723d17
HR
699 break;
700 default:
701 /* Useable path if active */
702 err = SCSI_DH_OK;
03197b61 703 pg->expiry = 0;
69723d17 704 break;
057ea7c9 705 }
c57168a1 706 spin_unlock_irqrestore(&pg->lock, flags);
c49c8345 707 kfree(buff);
057ea7c9
HR
708 return err;
709}
710
f2ecf13a
HR
711/*
712 * alua_stpg - Issue a SET TARGET PORT GROUP command
713 *
714 * Issue a SET TARGET PORT GROUP command and evaluate the
b2460756
HR
715 * response. Returns SCSI_DH_RETRY per default to trigger
716 * a re-evaluation of the target group state or SCSI_DH_OK
717 * if no further action needs to be taken.
f2ecf13a 718 */
43394c67 719static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
f2ecf13a 720{
b2460756
HR
721 int retval;
722 struct scsi_sense_hdr sense_hdr;
f2ecf13a 723
43394c67 724 if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) {
b2460756
HR
725 /* Only implicit ALUA supported, retry */
726 return SCSI_DH_RETRY;
727 }
43394c67 728 switch (pg->state) {
5115fc7e 729 case SCSI_ACCESS_STATE_OPTIMAL:
b2460756 730 return SCSI_DH_OK;
5115fc7e 731 case SCSI_ACCESS_STATE_ACTIVE:
43394c67
HR
732 if ((pg->flags & ALUA_OPTIMIZE_STPG) &&
733 !pg->pref &&
734 (pg->tpgs & TPGS_MODE_IMPLICIT))
b2460756 735 return SCSI_DH_OK;
f2ecf13a 736 break;
5115fc7e
HR
737 case SCSI_ACCESS_STATE_STANDBY:
738 case SCSI_ACCESS_STATE_UNAVAILABLE:
f2ecf13a 739 break;
5115fc7e 740 case SCSI_ACCESS_STATE_OFFLINE:
b2460756 741 return SCSI_DH_IO;
5115fc7e 742 case SCSI_ACCESS_STATE_TRANSITIONING:
f2ecf13a
HR
743 break;
744 default:
b2460756
HR
745 sdev_printk(KERN_INFO, sdev,
746 "%s: stpg failed, unhandled TPGS state %d",
43394c67 747 ALUA_DH_NAME, pg->state);
b2460756 748 return SCSI_DH_NOSYS;
f2ecf13a 749 }
43394c67 750 retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
f2ecf13a 751
b2460756 752 if (retval) {
40bb61a7 753 if (!scsi_sense_valid(&sense_hdr)) {
b2460756
HR
754 sdev_printk(KERN_INFO, sdev,
755 "%s: stpg failed, result %d",
756 ALUA_DH_NAME, retval);
40bb61a7 757 if (driver_byte(retval) == DRIVER_ERROR)
b2460756
HR
758 return SCSI_DH_DEV_TEMP_BUSY;
759 } else {
43394c67 760 sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
b2460756
HR
761 ALUA_DH_NAME);
762 scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr);
763 }
f2ecf13a 764 }
b2460756
HR
765 /* Retry RTPG */
766 return SCSI_DH_RETRY;
f2ecf13a
HR
767}
768
03197b61
HR
769static void alua_rtpg_work(struct work_struct *work)
770{
771 struct alua_port_group *pg =
772 container_of(work, struct alua_port_group, rtpg_work.work);
773 struct scsi_device *sdev;
774 LIST_HEAD(qdata_list);
775 int err = SCSI_DH_OK;
776 struct alua_queue_data *qdata, *tmp;
777 unsigned long flags;
00642a1b 778 struct workqueue_struct *alua_wq = kaluad_wq;
03197b61
HR
779
780 spin_lock_irqsave(&pg->lock, flags);
781 sdev = pg->rtpg_sdev;
782 if (!sdev) {
783 WARN_ON(pg->flags & ALUA_PG_RUN_RTPG);
784 WARN_ON(pg->flags & ALUA_PG_RUN_STPG);
785 spin_unlock_irqrestore(&pg->lock, flags);
786 return;
787 }
00642a1b
HR
788 if (pg->flags & ALUA_SYNC_STPG)
789 alua_wq = kaluad_sync_wq;
03197b61
HR
790 pg->flags |= ALUA_PG_RUNNING;
791 if (pg->flags & ALUA_PG_RUN_RTPG) {
9d2c3039
HR
792 int state = pg->state;
793
03197b61
HR
794 pg->flags &= ~ALUA_PG_RUN_RTPG;
795 spin_unlock_irqrestore(&pg->lock, flags);
5115fc7e 796 if (state == SCSI_ACCESS_STATE_TRANSITIONING) {
9d2c3039
HR
797 if (alua_tur(sdev) == SCSI_DH_RETRY) {
798 spin_lock_irqsave(&pg->lock, flags);
799 pg->flags &= ~ALUA_PG_RUNNING;
800 pg->flags |= ALUA_PG_RUN_RTPG;
801 spin_unlock_irqrestore(&pg->lock, flags);
802 queue_delayed_work(alua_wq, &pg->rtpg_work,
803 pg->interval * HZ);
804 return;
805 }
806 /* Send RTPG on failure or if TUR indicates SUCCESS */
807 }
03197b61
HR
808 err = alua_rtpg(sdev, pg);
809 spin_lock_irqsave(&pg->lock, flags);
2b35865e 810 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
03197b61
HR
811 pg->flags &= ~ALUA_PG_RUNNING;
812 pg->flags |= ALUA_PG_RUN_RTPG;
813 spin_unlock_irqrestore(&pg->lock, flags);
00642a1b 814 queue_delayed_work(alua_wq, &pg->rtpg_work,
03197b61
HR
815 pg->interval * HZ);
816 return;
817 }
818 if (err != SCSI_DH_OK)
819 pg->flags &= ~ALUA_PG_RUN_STPG;
820 }
821 if (pg->flags & ALUA_PG_RUN_STPG) {
822 pg->flags &= ~ALUA_PG_RUN_STPG;
823 spin_unlock_irqrestore(&pg->lock, flags);
824 err = alua_stpg(sdev, pg);
825 spin_lock_irqsave(&pg->lock, flags);
2b35865e 826 if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
03197b61
HR
827 pg->flags |= ALUA_PG_RUN_RTPG;
828 pg->interval = 0;
829 pg->flags &= ~ALUA_PG_RUNNING;
830 spin_unlock_irqrestore(&pg->lock, flags);
00642a1b 831 queue_delayed_work(alua_wq, &pg->rtpg_work,
03197b61
HR
832 pg->interval * HZ);
833 return;
834 }
835 }
836
837 list_splice_init(&pg->rtpg_list, &qdata_list);
838 pg->rtpg_sdev = NULL;
839 spin_unlock_irqrestore(&pg->lock, flags);
840
841 list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) {
842 list_del(&qdata->entry);
843 if (qdata->callback_fn)
844 qdata->callback_fn(qdata->callback_data, err);
845 kfree(qdata);
846 }
847 spin_lock_irqsave(&pg->lock, flags);
848 pg->flags &= ~ALUA_PG_RUNNING;
849 spin_unlock_irqrestore(&pg->lock, flags);
850 scsi_device_put(sdev);
851 kref_put(&pg->kref, release_port_group);
852}
853
854static void alua_rtpg_queue(struct alua_port_group *pg,
855 struct scsi_device *sdev,
2b35865e 856 struct alua_queue_data *qdata, bool force)
03197b61
HR
857{
858 int start_queue = 0;
859 unsigned long flags;
00642a1b 860 struct workqueue_struct *alua_wq = kaluad_wq;
03197b61
HR
861
862 if (!pg)
863 return;
864
865 spin_lock_irqsave(&pg->lock, flags);
866 if (qdata) {
867 list_add_tail(&qdata->entry, &pg->rtpg_list);
868 pg->flags |= ALUA_PG_RUN_STPG;
2b35865e 869 force = true;
03197b61
HR
870 }
871 if (pg->rtpg_sdev == NULL) {
872 pg->interval = 0;
873 pg->flags |= ALUA_PG_RUN_RTPG;
874 kref_get(&pg->kref);
875 pg->rtpg_sdev = sdev;
876 scsi_device_get(sdev);
877 start_queue = 1;
2b35865e
HR
878 } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
879 pg->flags |= ALUA_PG_RUN_RTPG;
880 /* Do not queue if the worker is already running */
881 if (!(pg->flags & ALUA_PG_RUNNING)) {
882 kref_get(&pg->kref);
883 start_queue = 1;
884 }
03197b61 885 }
2b35865e 886
00642a1b
HR
887 if (pg->flags & ALUA_SYNC_STPG)
888 alua_wq = kaluad_sync_wq;
03197b61
HR
889 spin_unlock_irqrestore(&pg->lock, flags);
890
891 if (start_queue &&
00642a1b 892 !queue_delayed_work(alua_wq, &pg->rtpg_work,
03197b61
HR
893 msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
894 scsi_device_put(sdev);
895 kref_put(&pg->kref, release_port_group);
896 }
897}
898
057ea7c9
HR
899/*
900 * alua_initialize - Initialize ALUA state
901 * @sdev: the device to be initialized
902 *
903 * For the prep_fn to work correctly we have
904 * to initialize the ALUA state for the device.
905 */
906static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
907{
43394c67 908 int err = SCSI_DH_DEV_UNSUPP, tpgs;
057ea7c9 909
03197b61 910 mutex_lock(&h->init_mutex);
43394c67 911 tpgs = alua_check_tpgs(sdev);
a4253fde
HR
912 if (tpgs != TPGS_MODE_NONE)
913 err = alua_check_vpd(sdev, h, tpgs);
03197b61
HR
914 h->init_error = err;
915 mutex_unlock(&h->init_mutex);
057ea7c9
HR
916 return err;
917}
4335d092
MB
918/*
919 * alua_set_params - set/unset the optimize flag
920 * @sdev: device on the path to be activated
921 * params - parameters in the following format
922 * "no_of_params\0param1\0param2\0param3\0...\0"
923 * For example, to set the flag pass the following parameters
924 * from multipath.conf
925 * hardware_handler "2 alua 1"
926 */
927static int alua_set_params(struct scsi_device *sdev, const char *params)
928{
ee14c674 929 struct alua_dh_data *h = sdev->handler_data;
03197b61 930 struct alua_port_group __rcu *pg = NULL;
4335d092
MB
931 unsigned int optimize = 0, argc;
932 const char *p = params;
933 int result = SCSI_DH_OK;
03197b61 934 unsigned long flags;
4335d092
MB
935
936 if ((sscanf(params, "%u", &argc) != 1) || (argc != 1))
937 return -EINVAL;
938
939 while (*p++)
940 ;
941 if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1))
942 return -EINVAL;
943
03197b61
HR
944 rcu_read_lock();
945 pg = rcu_dereference(h->pg);
946 if (!pg) {
947 rcu_read_unlock();
43394c67 948 return -ENXIO;
03197b61
HR
949 }
950 spin_lock_irqsave(&pg->lock, flags);
4335d092 951 if (optimize)
43394c67 952 pg->flags |= ALUA_OPTIMIZE_STPG;
4335d092 953 else
43394c67 954 pg->flags &= ~ALUA_OPTIMIZE_STPG;
03197b61
HR
955 spin_unlock_irqrestore(&pg->lock, flags);
956 rcu_read_unlock();
4335d092
MB
957
958 return result;
959}
057ea7c9
HR
960
961/*
962 * alua_activate - activate a path
963 * @sdev: device on the path to be activated
964 *
965 * We're currently switching the port group to be activated only and
966 * let the array figure out the rest.
967 * There may be other arrays which require us to switch all port groups
968 * based on a certain policy. But until we actually encounter them it
969 * should be okay.
970 */
3ae31f6a
CS
971static int alua_activate(struct scsi_device *sdev,
972 activate_complete fn, void *data)
057ea7c9 973{
ee14c674 974 struct alua_dh_data *h = sdev->handler_data;
057ea7c9 975 int err = SCSI_DH_OK;
03197b61
HR
976 struct alua_queue_data *qdata;
977 struct alua_port_group __rcu *pg;
057ea7c9 978
03197b61
HR
979 qdata = kzalloc(sizeof(*qdata), GFP_KERNEL);
980 if (!qdata) {
981 err = SCSI_DH_RES_TEMP_UNAVAIL;
46ccf6b5 982 goto out;
03197b61
HR
983 }
984 qdata->callback_fn = fn;
985 qdata->callback_data = data;
986
987 mutex_lock(&h->init_mutex);
988 rcu_read_lock();
989 pg = rcu_dereference(h->pg);
990 if (!pg || !kref_get_unless_zero(&pg->kref)) {
991 rcu_read_unlock();
992 kfree(qdata);
993 err = h->init_error;
994 mutex_unlock(&h->init_mutex);
43394c67
HR
995 goto out;
996 }
03197b61
HR
997 fn = NULL;
998 rcu_read_unlock();
999 mutex_unlock(&h->init_mutex);
1000
2b35865e 1001 alua_rtpg_queue(pg, sdev, qdata, true);
03197b61 1002 kref_put(&pg->kref, release_port_group);
057ea7c9 1003out:
b2460756 1004 if (fn)
3ae31f6a
CS
1005 fn(data, err);
1006 return 0;
057ea7c9
HR
1007}
1008
2b35865e
HR
1009/*
1010 * alua_check - check path status
1011 * @sdev: device on the path to be checked
1012 *
1013 * Check the device status
1014 */
1015static void alua_check(struct scsi_device *sdev, bool force)
1016{
1017 struct alua_dh_data *h = sdev->handler_data;
1018 struct alua_port_group *pg;
1019
1020 rcu_read_lock();
1021 pg = rcu_dereference(h->pg);
1022 if (!pg || !kref_get_unless_zero(&pg->kref)) {
1023 rcu_read_unlock();
1024 return;
1025 }
1026 rcu_read_unlock();
1027
1028 alua_rtpg_queue(pg, sdev, NULL, force);
1029 kref_put(&pg->kref, release_port_group);
1030}
1031
057ea7c9
HR
1032/*
1033 * alua_prep_fn - request callback
1034 *
1035 * Fail I/O to all paths not in state
1036 * active/optimized or active/non-optimized.
1037 */
1038static int alua_prep_fn(struct scsi_device *sdev, struct request *req)
1039{
ee14c674 1040 struct alua_dh_data *h = sdev->handler_data;
03197b61 1041 struct alua_port_group __rcu *pg;
5115fc7e 1042 unsigned char state = SCSI_ACCESS_STATE_OPTIMAL;
057ea7c9
HR
1043 int ret = BLKPREP_OK;
1044
03197b61
HR
1045 rcu_read_lock();
1046 pg = rcu_dereference(h->pg);
1047 if (pg)
1048 state = pg->state;
1049 rcu_read_unlock();
5115fc7e 1050 if (state == SCSI_ACCESS_STATE_TRANSITIONING)
69723d17 1051 ret = BLKPREP_DEFER;
5115fc7e
HR
1052 else if (state != SCSI_ACCESS_STATE_OPTIMAL &&
1053 state != SCSI_ACCESS_STATE_ACTIVE &&
1054 state != SCSI_ACCESS_STATE_LBA) {
057ea7c9
HR
1055 ret = BLKPREP_KILL;
1056 req->cmd_flags |= REQ_QUIET;
1057 }
1058 return ret;
1059
1060}
1061
d3d32891
HR
1062static void alua_rescan(struct scsi_device *sdev)
1063{
1064 struct alua_dh_data *h = sdev->handler_data;
1065
1066 alua_initialize(sdev, h);
1067}
1068
057ea7c9
HR
1069/*
1070 * alua_bus_attach - Attach device handler
1071 * @sdev: device to be attached to
1072 */
ee14c674 1073static int alua_bus_attach(struct scsi_device *sdev)
057ea7c9 1074{
057ea7c9 1075 struct alua_dh_data *h;
43394c67 1076 int err, ret = -EINVAL;
057ea7c9 1077
cd37743f 1078 h = kzalloc(sizeof(*h) , GFP_KERNEL);
1d520328 1079 if (!h)
ee14c674 1080 return -ENOMEM;
03197b61
HR
1081 spin_lock_init(&h->pg_lock);
1082 rcu_assign_pointer(h->pg, NULL);
1083 h->init_error = SCSI_DH_OK;
96e65865 1084 h->sdev = sdev;
cb0a168c 1085 INIT_LIST_HEAD(&h->node);
057ea7c9 1086
03197b61 1087 mutex_init(&h->init_mutex);
057ea7c9 1088 err = alua_initialize(sdev, h);
43394c67
HR
1089 if (err == SCSI_DH_NOMEM)
1090 ret = -ENOMEM;
1d520328 1091 if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED)
057ea7c9
HR
1092 goto failed;
1093
ee14c674
CH
1094 sdev->handler_data = h;
1095 return 0;
057ea7c9 1096failed:
cd37743f 1097 kfree(h);
43394c67 1098 return ret;
057ea7c9
HR
1099}
1100
1101/*
1102 * alua_bus_detach - Detach device handler
1103 * @sdev: device to be detached from
1104 */
1105static void alua_bus_detach(struct scsi_device *sdev)
1106{
ee14c674 1107 struct alua_dh_data *h = sdev->handler_data;
03197b61
HR
1108 struct alua_port_group *pg;
1109
1110 spin_lock(&h->pg_lock);
1111 pg = h->pg;
1112 rcu_assign_pointer(h->pg, NULL);
1113 h->sdev = NULL;
1114 spin_unlock(&h->pg_lock);
cb0a168c 1115 if (pg) {
38c31599 1116 spin_lock_irq(&pg->lock);
cb0a168c 1117 list_del_rcu(&h->node);
38c31599 1118 spin_unlock_irq(&pg->lock);
03197b61 1119 kref_put(&pg->kref, release_port_group);
cb0a168c 1120 }
ee14c674 1121 sdev->handler_data = NULL;
cd37743f 1122 kfree(h);
057ea7c9
HR
1123}
1124
1d520328
CH
1125static struct scsi_device_handler alua_dh = {
1126 .name = ALUA_DH_NAME,
1127 .module = THIS_MODULE,
1128 .attach = alua_bus_attach,
1129 .detach = alua_bus_detach,
1130 .prep_fn = alua_prep_fn,
1131 .check_sense = alua_check_sense,
1132 .activate = alua_activate,
d3d32891 1133 .rescan = alua_rescan,
1d520328 1134 .set_params = alua_set_params,
1d520328
CH
1135};
1136
057ea7c9
HR
1137static int __init alua_init(void)
1138{
1139 int r;
1140
03197b61
HR
1141 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
1142 if (!kaluad_wq) {
1143 /* Temporary failure, bypass */
1144 return SCSI_DH_DEV_TEMP_BUSY;
1145 }
00642a1b
HR
1146 kaluad_sync_wq = create_workqueue("kaluad_sync");
1147 if (!kaluad_sync_wq) {
1148 destroy_workqueue(kaluad_wq);
1149 return SCSI_DH_DEV_TEMP_BUSY;
1150 }
057ea7c9 1151 r = scsi_register_device_handler(&alua_dh);
03197b61 1152 if (r != 0) {
057ea7c9
HR
1153 printk(KERN_ERR "%s: Failed to register scsi device handler",
1154 ALUA_DH_NAME);
00642a1b 1155 destroy_workqueue(kaluad_sync_wq);
03197b61
HR
1156 destroy_workqueue(kaluad_wq);
1157 }
057ea7c9
HR
1158 return r;
1159}
1160
1161static void __exit alua_exit(void)
1162{
1163 scsi_unregister_device_handler(&alua_dh);
00642a1b 1164 destroy_workqueue(kaluad_sync_wq);
03197b61 1165 destroy_workqueue(kaluad_wq);
057ea7c9
HR
1166}
1167
1168module_init(alua_init);
1169module_exit(alua_exit);
1170
1171MODULE_DESCRIPTION("DM Multipath ALUA support");
1172MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>");
1173MODULE_LICENSE("GPL");
1174MODULE_VERSION(ALUA_DH_VER);