Commit | Line | Data |
---|---|---|
851b1642 AK |
1 | /* |
2 | * Linux driver for VMware's para-virtualized SCSI HBA. | |
3 | * | |
a2713cce | 4 | * Copyright (C) 2008-2014, VMware, Inc. All Rights Reserved. |
851b1642 AK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the | |
8 | * Free Software Foundation; version 2 of the License and no later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
13 | * NON INFRINGEMENT. See the GNU General Public License for more | |
14 | * details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | |
19 | * | |
29374ec6 | 20 | * Maintained by: Jim Gill <jgill@vmware.com> |
851b1642 AK |
21 | * |
22 | */ | |
23 | ||
24 | #include <linux/kernel.h> | |
25 | #include <linux/module.h> | |
26 | #include <linux/interrupt.h> | |
5a0e3ad6 | 27 | #include <linux/slab.h> |
851b1642 AK |
28 | #include <linux/workqueue.h> |
29 | #include <linux/pci.h> | |
30 | ||
31 | #include <scsi/scsi.h> | |
32 | #include <scsi/scsi_host.h> | |
33 | #include <scsi/scsi_cmnd.h> | |
34 | #include <scsi/scsi_device.h> | |
02845560 | 35 | #include <scsi/scsi_tcq.h> |
851b1642 AK |
36 | |
37 | #include "vmw_pvscsi.h" | |
38 | ||
39 | #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" | |
40 | ||
41 | MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); | |
42 | MODULE_AUTHOR("VMware, Inc."); | |
43 | MODULE_LICENSE("GPL"); | |
44 | MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); | |
45 | ||
46 | #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 | |
47 | #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 | |
02845560 | 48 | #define PVSCSI_DEFAULT_QUEUE_DEPTH 254 |
851b1642 AK |
49 | #define SGL_SIZE PAGE_SIZE |
50 | ||
51 | struct pvscsi_sg_list { | |
52 | struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; | |
53 | }; | |
54 | ||
55 | struct pvscsi_ctx { | |
56 | /* | |
57 | * The index of the context in cmd_map serves as the context ID for a | |
58 | * 1-to-1 mapping completions back to requests. | |
59 | */ | |
60 | struct scsi_cmnd *cmd; | |
61 | struct pvscsi_sg_list *sgl; | |
62 | struct list_head list; | |
63 | dma_addr_t dataPA; | |
64 | dma_addr_t sensePA; | |
65 | dma_addr_t sglPA; | |
a2713cce | 66 | struct completion *abort_cmp; |
851b1642 AK |
67 | }; |
68 | ||
69 | struct pvscsi_adapter { | |
70 | char *mmioBase; | |
851b1642 | 71 | u8 rev; |
851b1642 | 72 | bool use_msg; |
2a815b5a | 73 | bool use_req_threshold; |
851b1642 AK |
74 | |
75 | spinlock_t hw_lock; | |
76 | ||
77 | struct workqueue_struct *workqueue; | |
78 | struct work_struct work; | |
79 | ||
80 | struct PVSCSIRingReqDesc *req_ring; | |
81 | unsigned req_pages; | |
82 | unsigned req_depth; | |
83 | dma_addr_t reqRingPA; | |
84 | ||
85 | struct PVSCSIRingCmpDesc *cmp_ring; | |
86 | unsigned cmp_pages; | |
87 | dma_addr_t cmpRingPA; | |
88 | ||
89 | struct PVSCSIRingMsgDesc *msg_ring; | |
90 | unsigned msg_pages; | |
91 | dma_addr_t msgRingPA; | |
92 | ||
93 | struct PVSCSIRingsState *rings_state; | |
94 | dma_addr_t ringStatePA; | |
95 | ||
96 | struct pci_dev *dev; | |
97 | struct Scsi_Host *host; | |
98 | ||
99 | struct list_head cmd_pool; | |
100 | struct pvscsi_ctx *cmd_map; | |
101 | }; | |
102 | ||
103 | ||
104 | /* Command line parameters */ | |
02845560 | 105 | static int pvscsi_ring_pages; |
851b1642 AK |
106 | static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; |
107 | static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; | |
108 | static bool pvscsi_disable_msi; | |
109 | static bool pvscsi_disable_msix; | |
110 | static bool pvscsi_use_msg = true; | |
2a815b5a | 111 | static bool pvscsi_use_req_threshold = true; |
851b1642 AK |
112 | |
113 | #define PVSCSI_RW (S_IRUSR | S_IWUSR) | |
114 | ||
115 | module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); | |
116 | MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" | |
02845560 AK |
117 | __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) |
118 | "[up to 16 targets]," | |
119 | __stringify(PVSCSI_SETUP_RINGS_MAX_NUM_PAGES) | |
120 | "[for 16+ targets])"); | |
851b1642 AK |
121 | |
122 | module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); | |
123 | MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" | |
124 | __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); | |
125 | ||
126 | module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); | |
127 | MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" | |
02845560 | 128 | __stringify(PVSCSI_DEFAULT_QUEUE_DEPTH) ")"); |
851b1642 AK |
129 | |
130 | module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); | |
131 | MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); | |
132 | ||
133 | module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); | |
134 | MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); | |
135 | ||
136 | module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); | |
137 | MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); | |
138 | ||
2a815b5a RM |
139 | module_param_named(use_req_threshold, pvscsi_use_req_threshold, |
140 | bool, PVSCSI_RW); | |
141 | MODULE_PARM_DESC(use_req_threshold, "Use driver-based request coalescing if configured - (default=1)"); | |
142 | ||
851b1642 AK |
143 | static const struct pci_device_id pvscsi_pci_tbl[] = { |
144 | { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, | |
145 | { 0 } | |
146 | }; | |
147 | ||
148 | MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); | |
149 | ||
150 | static struct device * | |
151 | pvscsi_dev(const struct pvscsi_adapter *adapter) | |
152 | { | |
153 | return &(adapter->dev->dev); | |
154 | } | |
155 | ||
156 | static struct pvscsi_ctx * | |
157 | pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) | |
158 | { | |
159 | struct pvscsi_ctx *ctx, *end; | |
160 | ||
161 | end = &adapter->cmd_map[adapter->req_depth]; | |
162 | for (ctx = adapter->cmd_map; ctx < end; ctx++) | |
163 | if (ctx->cmd == cmd) | |
164 | return ctx; | |
165 | ||
166 | return NULL; | |
167 | } | |
168 | ||
169 | static struct pvscsi_ctx * | |
170 | pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) | |
171 | { | |
172 | struct pvscsi_ctx *ctx; | |
173 | ||
174 | if (list_empty(&adapter->cmd_pool)) | |
175 | return NULL; | |
176 | ||
177 | ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); | |
178 | ctx->cmd = cmd; | |
179 | list_del(&ctx->list); | |
180 | ||
181 | return ctx; | |
182 | } | |
183 | ||
184 | static void pvscsi_release_context(struct pvscsi_adapter *adapter, | |
185 | struct pvscsi_ctx *ctx) | |
186 | { | |
187 | ctx->cmd = NULL; | |
a2713cce | 188 | ctx->abort_cmp = NULL; |
851b1642 AK |
189 | list_add(&ctx->list, &adapter->cmd_pool); |
190 | } | |
191 | ||
192 | /* | |
193 | * Map a pvscsi_ctx struct to a context ID field value; we map to a simple | |
194 | * non-zero integer. ctx always points to an entry in cmd_map array, hence | |
195 | * the return value is always >=1. | |
196 | */ | |
197 | static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, | |
198 | const struct pvscsi_ctx *ctx) | |
199 | { | |
200 | return ctx - adapter->cmd_map + 1; | |
201 | } | |
202 | ||
203 | static struct pvscsi_ctx * | |
204 | pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) | |
205 | { | |
206 | return &adapter->cmd_map[context - 1]; | |
207 | } | |
208 | ||
209 | static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, | |
210 | u32 offset, u32 val) | |
211 | { | |
212 | writel(val, adapter->mmioBase + offset); | |
213 | } | |
214 | ||
215 | static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) | |
216 | { | |
217 | return readl(adapter->mmioBase + offset); | |
218 | } | |
219 | ||
220 | static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) | |
221 | { | |
222 | return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); | |
223 | } | |
224 | ||
225 | static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, | |
226 | u32 val) | |
227 | { | |
228 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); | |
229 | } | |
230 | ||
231 | static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) | |
232 | { | |
233 | u32 intr_bits; | |
234 | ||
235 | intr_bits = PVSCSI_INTR_CMPL_MASK; | |
236 | if (adapter->use_msg) | |
237 | intr_bits |= PVSCSI_INTR_MSG_MASK; | |
238 | ||
239 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); | |
240 | } | |
241 | ||
242 | static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) | |
243 | { | |
244 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); | |
245 | } | |
246 | ||
247 | static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, | |
248 | u32 cmd, const void *desc, size_t len) | |
249 | { | |
250 | const u32 *ptr = desc; | |
251 | size_t i; | |
252 | ||
253 | len /= sizeof(*ptr); | |
254 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); | |
255 | for (i = 0; i < len; i++) | |
256 | pvscsi_reg_write(adapter, | |
257 | PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); | |
258 | } | |
259 | ||
260 | static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, | |
261 | const struct pvscsi_ctx *ctx) | |
262 | { | |
263 | struct PVSCSICmdDescAbortCmd cmd = { 0 }; | |
264 | ||
265 | cmd.target = ctx->cmd->device->id; | |
266 | cmd.context = pvscsi_map_context(adapter, ctx); | |
267 | ||
268 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); | |
269 | } | |
270 | ||
271 | static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) | |
272 | { | |
273 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); | |
274 | } | |
275 | ||
276 | static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) | |
277 | { | |
278 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); | |
279 | } | |
280 | ||
281 | static int scsi_is_rw(unsigned char op) | |
282 | { | |
283 | return op == READ_6 || op == WRITE_6 || | |
284 | op == READ_10 || op == WRITE_10 || | |
285 | op == READ_12 || op == WRITE_12 || | |
286 | op == READ_16 || op == WRITE_16; | |
287 | } | |
288 | ||
289 | static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, | |
290 | unsigned char op) | |
291 | { | |
2a815b5a RM |
292 | if (scsi_is_rw(op)) { |
293 | struct PVSCSIRingsState *s = adapter->rings_state; | |
294 | ||
295 | if (!adapter->use_req_threshold || | |
296 | s->reqProdIdx - s->reqConsIdx >= s->reqCallThreshold) | |
297 | pvscsi_kick_rw_io(adapter); | |
298 | } else { | |
851b1642 | 299 | pvscsi_process_request_ring(adapter); |
2a815b5a | 300 | } |
851b1642 AK |
301 | } |
302 | ||
303 | static void ll_adapter_reset(const struct pvscsi_adapter *adapter) | |
304 | { | |
305 | dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); | |
306 | ||
307 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); | |
308 | } | |
309 | ||
310 | static void ll_bus_reset(const struct pvscsi_adapter *adapter) | |
311 | { | |
59e13d48 | 312 | dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter); |
851b1642 AK |
313 | |
314 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); | |
315 | } | |
316 | ||
317 | static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) | |
318 | { | |
319 | struct PVSCSICmdDescResetDevice cmd = { 0 }; | |
320 | ||
59e13d48 | 321 | dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target); |
851b1642 AK |
322 | |
323 | cmd.target = target; | |
324 | ||
325 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, | |
326 | &cmd, sizeof(cmd)); | |
327 | } | |
328 | ||
329 | static void pvscsi_create_sg(struct pvscsi_ctx *ctx, | |
330 | struct scatterlist *sg, unsigned count) | |
331 | { | |
332 | unsigned i; | |
333 | struct PVSCSISGElement *sge; | |
334 | ||
335 | BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); | |
336 | ||
337 | sge = &ctx->sgl->sge[0]; | |
338 | for (i = 0; i < count; i++, sg++) { | |
339 | sge[i].addr = sg_dma_address(sg); | |
340 | sge[i].length = sg_dma_len(sg); | |
341 | sge[i].flags = 0; | |
342 | } | |
343 | } | |
344 | ||
345 | /* | |
346 | * Map all data buffers for a command into PCI space and | |
347 | * setup the scatter/gather list if needed. | |
348 | */ | |
c965853a JB |
349 | static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, |
350 | struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, | |
351 | struct PVSCSIRingReqDesc *e) | |
851b1642 AK |
352 | { |
353 | unsigned count; | |
354 | unsigned bufflen = scsi_bufflen(cmd); | |
355 | struct scatterlist *sg; | |
356 | ||
357 | e->dataLen = bufflen; | |
358 | e->dataAddr = 0; | |
359 | if (bufflen == 0) | |
c965853a | 360 | return 0; |
851b1642 AK |
361 | |
362 | sg = scsi_sglist(cmd); | |
363 | count = scsi_sg_count(cmd); | |
364 | if (count != 0) { | |
365 | int segs = scsi_dma_map(cmd); | |
c965853a JB |
366 | |
367 | if (segs == -ENOMEM) { | |
368 | scmd_printk(KERN_ERR, cmd, | |
369 | "vmw_pvscsi: Failed to map cmd sglist for DMA.\n"); | |
370 | return -ENOMEM; | |
371 | } else if (segs > 1) { | |
851b1642 AK |
372 | pvscsi_create_sg(ctx, sg, segs); |
373 | ||
374 | e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; | |
9b7ca6c2 CH |
375 | ctx->sglPA = dma_map_single(&adapter->dev->dev, |
376 | ctx->sgl, SGL_SIZE, DMA_TO_DEVICE); | |
377 | if (dma_mapping_error(&adapter->dev->dev, ctx->sglPA)) { | |
c965853a JB |
378 | scmd_printk(KERN_ERR, cmd, |
379 | "vmw_pvscsi: Failed to map ctx sglist for DMA.\n"); | |
380 | scsi_dma_unmap(cmd); | |
381 | ctx->sglPA = 0; | |
382 | return -ENOMEM; | |
383 | } | |
851b1642 AK |
384 | e->dataAddr = ctx->sglPA; |
385 | } else | |
386 | e->dataAddr = sg_dma_address(sg); | |
387 | } else { | |
388 | /* | |
389 | * In case there is no S/G list, scsi_sglist points | |
390 | * directly to the buffer. | |
391 | */ | |
9b7ca6c2 | 392 | ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, |
851b1642 | 393 | cmd->sc_data_direction); |
9b7ca6c2 | 394 | if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { |
c965853a JB |
395 | scmd_printk(KERN_ERR, cmd, |
396 | "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); | |
397 | return -ENOMEM; | |
398 | } | |
851b1642 AK |
399 | e->dataAddr = ctx->dataPA; |
400 | } | |
c965853a JB |
401 | |
402 | return 0; | |
851b1642 AK |
403 | } |
404 | ||
405 | static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, | |
406 | struct pvscsi_ctx *ctx) | |
407 | { | |
408 | struct scsi_cmnd *cmd; | |
409 | unsigned bufflen; | |
410 | ||
411 | cmd = ctx->cmd; | |
412 | bufflen = scsi_bufflen(cmd); | |
413 | ||
414 | if (bufflen != 0) { | |
415 | unsigned count = scsi_sg_count(cmd); | |
416 | ||
417 | if (count != 0) { | |
418 | scsi_dma_unmap(cmd); | |
419 | if (ctx->sglPA) { | |
9b7ca6c2 CH |
420 | dma_unmap_single(&adapter->dev->dev, ctx->sglPA, |
421 | SGL_SIZE, DMA_TO_DEVICE); | |
851b1642 AK |
422 | ctx->sglPA = 0; |
423 | } | |
424 | } else | |
9b7ca6c2 CH |
425 | dma_unmap_single(&adapter->dev->dev, ctx->dataPA, |
426 | bufflen, cmd->sc_data_direction); | |
851b1642 AK |
427 | } |
428 | if (cmd->sense_buffer) | |
9b7ca6c2 CH |
429 | dma_unmap_single(&adapter->dev->dev, ctx->sensePA, |
430 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | |
851b1642 AK |
431 | } |
432 | ||
6f039790 | 433 | static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) |
851b1642 | 434 | { |
9b7ca6c2 CH |
435 | adapter->rings_state = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, |
436 | &adapter->ringStatePA, GFP_KERNEL); | |
851b1642 AK |
437 | if (!adapter->rings_state) |
438 | return -ENOMEM; | |
439 | ||
440 | adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, | |
441 | pvscsi_ring_pages); | |
442 | adapter->req_depth = adapter->req_pages | |
443 | * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; | |
9b7ca6c2 CH |
444 | adapter->req_ring = dma_alloc_coherent(&adapter->dev->dev, |
445 | adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA, | |
446 | GFP_KERNEL); | |
851b1642 AK |
447 | if (!adapter->req_ring) |
448 | return -ENOMEM; | |
449 | ||
450 | adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, | |
451 | pvscsi_ring_pages); | |
9b7ca6c2 CH |
452 | adapter->cmp_ring = dma_alloc_coherent(&adapter->dev->dev, |
453 | adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA, | |
454 | GFP_KERNEL); | |
851b1642 AK |
455 | if (!adapter->cmp_ring) |
456 | return -ENOMEM; | |
457 | ||
458 | BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); | |
459 | BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); | |
460 | BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); | |
461 | ||
462 | if (!adapter->use_msg) | |
463 | return 0; | |
464 | ||
465 | adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, | |
466 | pvscsi_msg_ring_pages); | |
9b7ca6c2 CH |
467 | adapter->msg_ring = dma_alloc_coherent(&adapter->dev->dev, |
468 | adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA, | |
469 | GFP_KERNEL); | |
851b1642 AK |
470 | if (!adapter->msg_ring) |
471 | return -ENOMEM; | |
472 | BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); | |
473 | ||
474 | return 0; | |
475 | } | |
476 | ||
477 | static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) | |
478 | { | |
479 | struct PVSCSICmdDescSetupRings cmd = { 0 }; | |
480 | dma_addr_t base; | |
481 | unsigned i; | |
482 | ||
483 | cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; | |
484 | cmd.reqRingNumPages = adapter->req_pages; | |
485 | cmd.cmpRingNumPages = adapter->cmp_pages; | |
486 | ||
487 | base = adapter->reqRingPA; | |
488 | for (i = 0; i < adapter->req_pages; i++) { | |
489 | cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; | |
490 | base += PAGE_SIZE; | |
491 | } | |
492 | ||
493 | base = adapter->cmpRingPA; | |
494 | for (i = 0; i < adapter->cmp_pages; i++) { | |
495 | cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; | |
496 | base += PAGE_SIZE; | |
497 | } | |
498 | ||
499 | memset(adapter->rings_state, 0, PAGE_SIZE); | |
500 | memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); | |
501 | memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); | |
502 | ||
503 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, | |
504 | &cmd, sizeof(cmd)); | |
505 | ||
506 | if (adapter->use_msg) { | |
507 | struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; | |
508 | ||
509 | cmd_msg.numPages = adapter->msg_pages; | |
510 | ||
511 | base = adapter->msgRingPA; | |
512 | for (i = 0; i < adapter->msg_pages; i++) { | |
513 | cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; | |
514 | base += PAGE_SIZE; | |
515 | } | |
516 | memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); | |
517 | ||
518 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, | |
519 | &cmd_msg, sizeof(cmd_msg)); | |
520 | } | |
521 | } | |
522 | ||
db5ed4df | 523 | static int pvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) |
02845560 | 524 | { |
02845560 | 525 | if (!sdev->tagged_supported) |
1e6f2416 | 526 | qdepth = 1; |
39f79500 | 527 | return scsi_change_queue_depth(sdev, qdepth); |
02845560 AK |
528 | } |
529 | ||
851b1642 AK |
530 | /* |
531 | * Pull a completion descriptor off and pass the completion back | |
532 | * to the SCSI mid layer. | |
533 | */ | |
534 | static void pvscsi_complete_request(struct pvscsi_adapter *adapter, | |
535 | const struct PVSCSIRingCmpDesc *e) | |
536 | { | |
537 | struct pvscsi_ctx *ctx; | |
538 | struct scsi_cmnd *cmd; | |
a2713cce | 539 | struct completion *abort_cmp; |
851b1642 AK |
540 | u32 btstat = e->hostStatus; |
541 | u32 sdstat = e->scsiStatus; | |
542 | ||
543 | ctx = pvscsi_get_context(adapter, e->context); | |
544 | cmd = ctx->cmd; | |
a2713cce | 545 | abort_cmp = ctx->abort_cmp; |
851b1642 AK |
546 | pvscsi_unmap_buffers(adapter, ctx); |
547 | pvscsi_release_context(adapter, ctx); | |
a2713cce AK |
548 | if (abort_cmp) { |
549 | /* | |
550 | * The command was requested to be aborted. Just signal that | |
551 | * the request completed and swallow the actual cmd completion | |
552 | * here. The abort handler will post a completion for this | |
553 | * command indicating that it got successfully aborted. | |
554 | */ | |
555 | complete(abort_cmp); | |
556 | return; | |
557 | } | |
851b1642 | 558 | |
a2713cce | 559 | cmd->result = 0; |
851b1642 AK |
560 | if (sdstat != SAM_STAT_GOOD && |
561 | (btstat == BTSTAT_SUCCESS || | |
562 | btstat == BTSTAT_LINKED_COMMAND_COMPLETED || | |
563 | btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { | |
e95153b6 JG |
564 | if (sdstat == SAM_STAT_COMMAND_TERMINATED) { |
565 | cmd->result = (DID_RESET << 16); | |
566 | } else { | |
567 | cmd->result = (DID_OK << 16) | sdstat; | |
568 | if (sdstat == SAM_STAT_CHECK_CONDITION && | |
569 | cmd->sense_buffer) | |
570 | cmd->result |= (DRIVER_SENSE << 24); | |
571 | } | |
851b1642 AK |
572 | } else |
573 | switch (btstat) { | |
574 | case BTSTAT_SUCCESS: | |
575 | case BTSTAT_LINKED_COMMAND_COMPLETED: | |
576 | case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: | |
577 | /* If everything went fine, let's move on.. */ | |
578 | cmd->result = (DID_OK << 16); | |
579 | break; | |
580 | ||
581 | case BTSTAT_DATARUN: | |
582 | case BTSTAT_DATA_UNDERRUN: | |
583 | /* Report residual data in underruns */ | |
584 | scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); | |
585 | cmd->result = (DID_ERROR << 16); | |
586 | break; | |
587 | ||
588 | case BTSTAT_SELTIMEO: | |
589 | /* Our emulation returns this for non-connected devs */ | |
590 | cmd->result = (DID_BAD_TARGET << 16); | |
591 | break; | |
592 | ||
593 | case BTSTAT_LUNMISMATCH: | |
594 | case BTSTAT_TAGREJECT: | |
595 | case BTSTAT_BADMSG: | |
596 | cmd->result = (DRIVER_INVALID << 24); | |
597 | /* fall through */ | |
598 | ||
599 | case BTSTAT_HAHARDWARE: | |
600 | case BTSTAT_INVPHASE: | |
601 | case BTSTAT_HATIMEOUT: | |
602 | case BTSTAT_NORESPONSE: | |
603 | case BTSTAT_DISCONNECT: | |
604 | case BTSTAT_HASOFTWARE: | |
605 | case BTSTAT_BUSFREE: | |
606 | case BTSTAT_SENSFAILED: | |
607 | cmd->result |= (DID_ERROR << 16); | |
608 | break; | |
609 | ||
610 | case BTSTAT_SENTRST: | |
611 | case BTSTAT_RECVRST: | |
612 | case BTSTAT_BUSRESET: | |
613 | cmd->result = (DID_RESET << 16); | |
614 | break; | |
615 | ||
616 | case BTSTAT_ABORTQUEUE: | |
f4b02427 | 617 | cmd->result = (DID_BUS_BUSY << 16); |
851b1642 AK |
618 | break; |
619 | ||
620 | case BTSTAT_SCSIPARITY: | |
621 | cmd->result = (DID_PARITY << 16); | |
622 | break; | |
623 | ||
624 | default: | |
625 | cmd->result = (DID_ERROR << 16); | |
626 | scmd_printk(KERN_DEBUG, cmd, | |
627 | "Unknown completion status: 0x%x\n", | |
628 | btstat); | |
629 | } | |
630 | ||
631 | dev_dbg(&cmd->device->sdev_gendev, | |
632 | "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", | |
633 | cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); | |
634 | ||
635 | cmd->scsi_done(cmd); | |
636 | } | |
637 | ||
638 | /* | |
639 | * barrier usage : Since the PVSCSI device is emulated, there could be cases | |
640 | * where we may want to serialize some accesses between the driver and the | |
641 | * emulation layer. We use compiler barriers instead of the more expensive | |
642 | * memory barriers because PVSCSI is only supported on X86 which has strong | |
643 | * memory access ordering. | |
644 | */ | |
645 | static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) | |
646 | { | |
647 | struct PVSCSIRingsState *s = adapter->rings_state; | |
648 | struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; | |
649 | u32 cmp_entries = s->cmpNumEntriesLog2; | |
650 | ||
651 | while (s->cmpConsIdx != s->cmpProdIdx) { | |
652 | struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & | |
653 | MASK(cmp_entries)); | |
654 | /* | |
655 | * This barrier() ensures that *e is not dereferenced while | |
656 | * the device emulation still writes data into the slot. | |
657 | * Since the device emulation advances s->cmpProdIdx only after | |
658 | * updating the slot we want to check it first. | |
659 | */ | |
660 | barrier(); | |
661 | pvscsi_complete_request(adapter, e); | |
662 | /* | |
663 | * This barrier() ensures that compiler doesn't reorder write | |
664 | * to s->cmpConsIdx before the read of (*e) inside | |
665 | * pvscsi_complete_request. Otherwise, device emulation may | |
666 | * overwrite *e before we had a chance to read it. | |
667 | */ | |
668 | barrier(); | |
669 | s->cmpConsIdx++; | |
670 | } | |
671 | } | |
672 | ||
673 | /* | |
674 | * Translate a Linux SCSI request into a request ring entry. | |
675 | */ | |
676 | static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, | |
677 | struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) | |
678 | { | |
679 | struct PVSCSIRingsState *s; | |
680 | struct PVSCSIRingReqDesc *e; | |
681 | struct scsi_device *sdev; | |
682 | u32 req_entries; | |
683 | ||
684 | s = adapter->rings_state; | |
685 | sdev = cmd->device; | |
686 | req_entries = s->reqNumEntriesLog2; | |
687 | ||
688 | /* | |
689 | * If this condition holds, we might have room on the request ring, but | |
690 | * we might not have room on the completion ring for the response. | |
691 | * However, we have already ruled out this possibility - we would not | |
692 | * have successfully allocated a context if it were true, since we only | |
693 | * have one context per request entry. Check for it anyway, since it | |
694 | * would be a serious bug. | |
695 | */ | |
696 | if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { | |
697 | scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " | |
698 | "ring full: reqProdIdx=%d cmpConsIdx=%d\n", | |
699 | s->reqProdIdx, s->cmpConsIdx); | |
700 | return -1; | |
701 | } | |
702 | ||
703 | e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); | |
704 | ||
705 | e->bus = sdev->channel; | |
706 | e->target = sdev->id; | |
707 | memset(e->lun, 0, sizeof(e->lun)); | |
708 | e->lun[1] = sdev->lun; | |
709 | ||
710 | if (cmd->sense_buffer) { | |
9b7ca6c2 CH |
711 | ctx->sensePA = dma_map_single(&adapter->dev->dev, |
712 | cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, | |
713 | DMA_FROM_DEVICE); | |
714 | if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { | |
c965853a JB |
715 | scmd_printk(KERN_ERR, cmd, |
716 | "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); | |
717 | ctx->sensePA = 0; | |
718 | return -ENOMEM; | |
719 | } | |
851b1642 AK |
720 | e->senseAddr = ctx->sensePA; |
721 | e->senseLen = SCSI_SENSE_BUFFERSIZE; | |
722 | } else { | |
723 | e->senseLen = 0; | |
724 | e->senseAddr = 0; | |
725 | } | |
726 | e->cdbLen = cmd->cmd_len; | |
727 | e->vcpuHint = smp_processor_id(); | |
728 | memcpy(e->cdb, cmd->cmnd, e->cdbLen); | |
729 | ||
730 | e->tag = SIMPLE_QUEUE_TAG; | |
851b1642 AK |
731 | |
732 | if (cmd->sc_data_direction == DMA_FROM_DEVICE) | |
733 | e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; | |
734 | else if (cmd->sc_data_direction == DMA_TO_DEVICE) | |
735 | e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; | |
736 | else if (cmd->sc_data_direction == DMA_NONE) | |
737 | e->flags = PVSCSI_FLAG_CMD_DIR_NONE; | |
738 | else | |
739 | e->flags = 0; | |
740 | ||
c965853a JB |
741 | if (pvscsi_map_buffers(adapter, ctx, cmd, e) != 0) { |
742 | if (cmd->sense_buffer) { | |
9b7ca6c2 | 743 | dma_unmap_single(&adapter->dev->dev, ctx->sensePA, |
c965853a | 744 | SCSI_SENSE_BUFFERSIZE, |
9b7ca6c2 | 745 | DMA_FROM_DEVICE); |
c965853a JB |
746 | ctx->sensePA = 0; |
747 | } | |
748 | return -ENOMEM; | |
749 | } | |
851b1642 AK |
750 | |
751 | e->context = pvscsi_map_context(adapter, ctx); | |
752 | ||
753 | barrier(); | |
754 | ||
755 | s->reqProdIdx++; | |
756 | ||
757 | return 0; | |
758 | } | |
759 | ||
f281233d | 760 | static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) |
851b1642 AK |
761 | { |
762 | struct Scsi_Host *host = cmd->device->host; | |
763 | struct pvscsi_adapter *adapter = shost_priv(host); | |
764 | struct pvscsi_ctx *ctx; | |
765 | unsigned long flags; | |
766 | ||
767 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
768 | ||
769 | ctx = pvscsi_acquire_context(adapter, cmd); | |
770 | if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { | |
771 | if (ctx) | |
772 | pvscsi_release_context(adapter, ctx); | |
773 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
774 | return SCSI_MLQUEUE_HOST_BUSY; | |
775 | } | |
776 | ||
777 | cmd->scsi_done = done; | |
778 | ||
779 | dev_dbg(&cmd->device->sdev_gendev, | |
780 | "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); | |
781 | ||
782 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
783 | ||
784 | pvscsi_kick_io(adapter, cmd->cmnd[0]); | |
785 | ||
786 | return 0; | |
787 | } | |
788 | ||
f281233d JG |
789 | static DEF_SCSI_QCMD(pvscsi_queue) |
790 | ||
851b1642 AK |
791 | static int pvscsi_abort(struct scsi_cmnd *cmd) |
792 | { | |
793 | struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); | |
794 | struct pvscsi_ctx *ctx; | |
795 | unsigned long flags; | |
a2713cce AK |
796 | int result = SUCCESS; |
797 | DECLARE_COMPLETION_ONSTACK(abort_cmp); | |
aac173e9 | 798 | int done; |
851b1642 AK |
799 | |
800 | scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", | |
801 | adapter->host->host_no, cmd); | |
802 | ||
803 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
804 | ||
805 | /* | |
806 | * Poll the completion ring first - we might be trying to abort | |
807 | * a command that is waiting to be dispatched in the completion ring. | |
808 | */ | |
809 | pvscsi_process_completion_ring(adapter); | |
810 | ||
811 | /* | |
812 | * If there is no context for the command, it either already succeeded | |
813 | * or else was never properly issued. Not our problem. | |
814 | */ | |
815 | ctx = pvscsi_find_context(adapter, cmd); | |
816 | if (!ctx) { | |
817 | scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); | |
818 | goto out; | |
819 | } | |
820 | ||
a2713cce AK |
821 | /* |
822 | * Mark that the command has been requested to be aborted and issue | |
823 | * the abort. | |
824 | */ | |
825 | ctx->abort_cmp = &abort_cmp; | |
826 | ||
851b1642 | 827 | pvscsi_abort_cmd(adapter, ctx); |
a2713cce AK |
828 | spin_unlock_irqrestore(&adapter->hw_lock, flags); |
829 | /* Wait for 2 secs for the completion. */ | |
aac173e9 | 830 | done = wait_for_completion_timeout(&abort_cmp, msecs_to_jiffies(2000)); |
a2713cce | 831 | spin_lock_irqsave(&adapter->hw_lock, flags); |
851b1642 | 832 | |
aac173e9 | 833 | if (!done) { |
a2713cce AK |
834 | /* |
835 | * Failed to abort the command, unmark the fact that it | |
836 | * was requested to be aborted. | |
837 | */ | |
838 | ctx->abort_cmp = NULL; | |
839 | result = FAILED; | |
840 | scmd_printk(KERN_DEBUG, cmd, | |
841 | "Failed to get completion for aborted cmd %p\n", | |
842 | cmd); | |
843 | goto out; | |
844 | } | |
845 | ||
846 | /* | |
847 | * Successfully aborted the command. | |
848 | */ | |
849 | cmd->result = (DID_ABORT << 16); | |
850 | cmd->scsi_done(cmd); | |
851b1642 AK |
851 | |
852 | out: | |
853 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
a2713cce | 854 | return result; |
851b1642 AK |
855 | } |
856 | ||
857 | /* | |
858 | * Abort all outstanding requests. This is only safe to use if the completion | |
859 | * ring will never be walked again or the device has been reset, because it | |
860 | * destroys the 1-1 mapping between context field passed to emulation and our | |
861 | * request structure. | |
862 | */ | |
863 | static void pvscsi_reset_all(struct pvscsi_adapter *adapter) | |
864 | { | |
865 | unsigned i; | |
866 | ||
867 | for (i = 0; i < adapter->req_depth; i++) { | |
868 | struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; | |
869 | struct scsi_cmnd *cmd = ctx->cmd; | |
870 | if (cmd) { | |
871 | scmd_printk(KERN_ERR, cmd, | |
872 | "Forced reset on cmd %p\n", cmd); | |
873 | pvscsi_unmap_buffers(adapter, ctx); | |
874 | pvscsi_release_context(adapter, ctx); | |
875 | cmd->result = (DID_RESET << 16); | |
876 | cmd->scsi_done(cmd); | |
877 | } | |
878 | } | |
879 | } | |
880 | ||
881 | static int pvscsi_host_reset(struct scsi_cmnd *cmd) | |
882 | { | |
883 | struct Scsi_Host *host = cmd->device->host; | |
884 | struct pvscsi_adapter *adapter = shost_priv(host); | |
885 | unsigned long flags; | |
886 | bool use_msg; | |
887 | ||
888 | scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); | |
889 | ||
890 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
891 | ||
892 | use_msg = adapter->use_msg; | |
893 | ||
894 | if (use_msg) { | |
895 | adapter->use_msg = 0; | |
896 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
897 | ||
898 | /* | |
899 | * Now that we know that the ISR won't add more work on the | |
900 | * workqueue we can safely flush any outstanding work. | |
901 | */ | |
902 | flush_workqueue(adapter->workqueue); | |
903 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
904 | } | |
905 | ||
906 | /* | |
907 | * We're going to tear down the entire ring structure and set it back | |
908 | * up, so stalling new requests until all completions are flushed and | |
909 | * the rings are back in place. | |
910 | */ | |
911 | ||
912 | pvscsi_process_request_ring(adapter); | |
913 | ||
914 | ll_adapter_reset(adapter); | |
915 | ||
916 | /* | |
917 | * Now process any completions. Note we do this AFTER adapter reset, | |
918 | * which is strange, but stops races where completions get posted | |
919 | * between processing the ring and issuing the reset. The backend will | |
920 | * not touch the ring memory after reset, so the immediately pre-reset | |
921 | * completion ring state is still valid. | |
922 | */ | |
923 | pvscsi_process_completion_ring(adapter); | |
924 | ||
925 | pvscsi_reset_all(adapter); | |
926 | adapter->use_msg = use_msg; | |
927 | pvscsi_setup_all_rings(adapter); | |
928 | pvscsi_unmask_intr(adapter); | |
929 | ||
930 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
931 | ||
932 | return SUCCESS; | |
933 | } | |
934 | ||
935 | static int pvscsi_bus_reset(struct scsi_cmnd *cmd) | |
936 | { | |
937 | struct Scsi_Host *host = cmd->device->host; | |
938 | struct pvscsi_adapter *adapter = shost_priv(host); | |
939 | unsigned long flags; | |
940 | ||
941 | scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); | |
942 | ||
943 | /* | |
944 | * We don't want to queue new requests for this bus after | |
945 | * flushing all pending requests to emulation, since new | |
946 | * requests could then sneak in during this bus reset phase, | |
947 | * so take the lock now. | |
948 | */ | |
949 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
950 | ||
951 | pvscsi_process_request_ring(adapter); | |
952 | ll_bus_reset(adapter); | |
953 | pvscsi_process_completion_ring(adapter); | |
954 | ||
955 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
956 | ||
957 | return SUCCESS; | |
958 | } | |
959 | ||
960 | static int pvscsi_device_reset(struct scsi_cmnd *cmd) | |
961 | { | |
962 | struct Scsi_Host *host = cmd->device->host; | |
963 | struct pvscsi_adapter *adapter = shost_priv(host); | |
964 | unsigned long flags; | |
965 | ||
966 | scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", | |
967 | host->host_no, cmd->device->id); | |
968 | ||
969 | /* | |
970 | * We don't want to queue new requests for this device after flushing | |
971 | * all pending requests to emulation, since new requests could then | |
972 | * sneak in during this device reset phase, so take the lock now. | |
973 | */ | |
974 | spin_lock_irqsave(&adapter->hw_lock, flags); | |
975 | ||
976 | pvscsi_process_request_ring(adapter); | |
977 | ll_device_reset(adapter, cmd->device->id); | |
978 | pvscsi_process_completion_ring(adapter); | |
979 | ||
980 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
981 | ||
982 | return SUCCESS; | |
983 | } | |
984 | ||
985 | static struct scsi_host_template pvscsi_template; | |
986 | ||
987 | static const char *pvscsi_info(struct Scsi_Host *host) | |
988 | { | |
989 | struct pvscsi_adapter *adapter = shost_priv(host); | |
990 | static char buf[256]; | |
991 | ||
992 | sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " | |
993 | "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, | |
994 | adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, | |
995 | pvscsi_template.cmd_per_lun); | |
996 | ||
997 | return buf; | |
998 | } | |
999 | ||
1000 | static struct scsi_host_template pvscsi_template = { | |
1001 | .module = THIS_MODULE, | |
1002 | .name = "VMware PVSCSI Host Adapter", | |
1003 | .proc_name = "vmw_pvscsi", | |
1004 | .info = pvscsi_info, | |
1005 | .queuecommand = pvscsi_queue, | |
1006 | .this_id = -1, | |
1007 | .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, | |
1008 | .dma_boundary = UINT_MAX, | |
1009 | .max_sectors = 0xffff, | |
02845560 | 1010 | .change_queue_depth = pvscsi_change_queue_depth, |
851b1642 AK |
1011 | .eh_abort_handler = pvscsi_abort, |
1012 | .eh_device_reset_handler = pvscsi_device_reset, | |
1013 | .eh_bus_reset_handler = pvscsi_bus_reset, | |
1014 | .eh_host_reset_handler = pvscsi_host_reset, | |
1015 | }; | |
1016 | ||
1017 | static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, | |
1018 | const struct PVSCSIRingMsgDesc *e) | |
1019 | { | |
1020 | struct PVSCSIRingsState *s = adapter->rings_state; | |
1021 | struct Scsi_Host *host = adapter->host; | |
1022 | struct scsi_device *sdev; | |
1023 | ||
1024 | printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", | |
1025 | e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); | |
1026 | ||
1027 | BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); | |
1028 | ||
1029 | if (e->type == PVSCSI_MSG_DEV_ADDED) { | |
1030 | struct PVSCSIMsgDescDevStatusChanged *desc; | |
1031 | desc = (struct PVSCSIMsgDescDevStatusChanged *)e; | |
1032 | ||
1033 | printk(KERN_INFO | |
1034 | "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", | |
1035 | desc->bus, desc->target, desc->lun[1]); | |
1036 | ||
1037 | if (!scsi_host_get(host)) | |
1038 | return; | |
1039 | ||
1040 | sdev = scsi_device_lookup(host, desc->bus, desc->target, | |
1041 | desc->lun[1]); | |
1042 | if (sdev) { | |
1043 | printk(KERN_INFO "vmw_pvscsi: device already exists\n"); | |
1044 | scsi_device_put(sdev); | |
1045 | } else | |
1046 | scsi_add_device(adapter->host, desc->bus, | |
1047 | desc->target, desc->lun[1]); | |
1048 | ||
1049 | scsi_host_put(host); | |
1050 | } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { | |
1051 | struct PVSCSIMsgDescDevStatusChanged *desc; | |
1052 | desc = (struct PVSCSIMsgDescDevStatusChanged *)e; | |
1053 | ||
1054 | printk(KERN_INFO | |
1055 | "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", | |
1056 | desc->bus, desc->target, desc->lun[1]); | |
1057 | ||
1058 | if (!scsi_host_get(host)) | |
1059 | return; | |
1060 | ||
1061 | sdev = scsi_device_lookup(host, desc->bus, desc->target, | |
1062 | desc->lun[1]); | |
1063 | if (sdev) { | |
1064 | scsi_remove_device(sdev); | |
1065 | scsi_device_put(sdev); | |
1066 | } else | |
1067 | printk(KERN_INFO | |
1068 | "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", | |
1069 | desc->bus, desc->target, desc->lun[1]); | |
1070 | ||
1071 | scsi_host_put(host); | |
1072 | } | |
1073 | } | |
1074 | ||
1075 | static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) | |
1076 | { | |
1077 | struct PVSCSIRingsState *s = adapter->rings_state; | |
1078 | ||
1079 | return s->msgProdIdx != s->msgConsIdx; | |
1080 | } | |
1081 | ||
1082 | static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) | |
1083 | { | |
1084 | struct PVSCSIRingsState *s = adapter->rings_state; | |
1085 | struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; | |
1086 | u32 msg_entries = s->msgNumEntriesLog2; | |
1087 | ||
1088 | while (pvscsi_msg_pending(adapter)) { | |
1089 | struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & | |
1090 | MASK(msg_entries)); | |
1091 | ||
1092 | barrier(); | |
1093 | pvscsi_process_msg(adapter, e); | |
1094 | barrier(); | |
1095 | s->msgConsIdx++; | |
1096 | } | |
1097 | } | |
1098 | ||
1099 | static void pvscsi_msg_workqueue_handler(struct work_struct *data) | |
1100 | { | |
1101 | struct pvscsi_adapter *adapter; | |
1102 | ||
1103 | adapter = container_of(data, struct pvscsi_adapter, work); | |
1104 | ||
1105 | pvscsi_process_msg_ring(adapter); | |
1106 | } | |
1107 | ||
1108 | static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) | |
1109 | { | |
1110 | char name[32]; | |
1111 | ||
1112 | if (!pvscsi_use_msg) | |
1113 | return 0; | |
1114 | ||
1115 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, | |
1116 | PVSCSI_CMD_SETUP_MSG_RING); | |
1117 | ||
1118 | if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) | |
1119 | return 0; | |
1120 | ||
1121 | snprintf(name, sizeof(name), | |
1122 | "vmw_pvscsi_wq_%u", adapter->host->host_no); | |
1123 | ||
1124 | adapter->workqueue = create_singlethread_workqueue(name); | |
1125 | if (!adapter->workqueue) { | |
1126 | printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); | |
1127 | return 0; | |
1128 | } | |
1129 | INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); | |
1130 | ||
1131 | return 1; | |
1132 | } | |
1133 | ||
2a815b5a RM |
1134 | static bool pvscsi_setup_req_threshold(struct pvscsi_adapter *adapter, |
1135 | bool enable) | |
1136 | { | |
1137 | u32 val; | |
1138 | ||
1139 | if (!pvscsi_use_req_threshold) | |
1140 | return false; | |
1141 | ||
1142 | pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, | |
1143 | PVSCSI_CMD_SETUP_REQCALLTHRESHOLD); | |
1144 | val = pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS); | |
1145 | if (val == -1) { | |
1146 | printk(KERN_INFO "vmw_pvscsi: device does not support req_threshold\n"); | |
1147 | return false; | |
1148 | } else { | |
1149 | struct PVSCSICmdDescSetupReqCall cmd_msg = { 0 }; | |
1150 | cmd_msg.enable = enable; | |
1151 | printk(KERN_INFO | |
1152 | "vmw_pvscsi: %sabling reqCallThreshold\n", | |
1153 | enable ? "en" : "dis"); | |
1154 | pvscsi_write_cmd_desc(adapter, | |
1155 | PVSCSI_CMD_SETUP_REQCALLTHRESHOLD, | |
1156 | &cmd_msg, sizeof(cmd_msg)); | |
1157 | return pvscsi_reg_read(adapter, | |
1158 | PVSCSI_REG_OFFSET_COMMAND_STATUS) != 0; | |
1159 | } | |
1160 | } | |
1161 | ||
851b1642 AK |
1162 | static irqreturn_t pvscsi_isr(int irq, void *devp) |
1163 | { | |
1164 | struct pvscsi_adapter *adapter = devp; | |
2e48e349 | 1165 | unsigned long flags; |
851b1642 | 1166 | |
2e48e349 CH |
1167 | spin_lock_irqsave(&adapter->hw_lock, flags); |
1168 | pvscsi_process_completion_ring(adapter); | |
1169 | if (adapter->use_msg && pvscsi_msg_pending(adapter)) | |
1170 | queue_work(adapter->workqueue, &adapter->work); | |
1171 | spin_unlock_irqrestore(&adapter->hw_lock, flags); | |
851b1642 | 1172 | |
2e48e349 CH |
1173 | return IRQ_HANDLED; |
1174 | } | |
851b1642 | 1175 | |
2e48e349 CH |
1176 | static irqreturn_t pvscsi_shared_isr(int irq, void *devp) |
1177 | { | |
1178 | struct pvscsi_adapter *adapter = devp; | |
1179 | u32 val = pvscsi_read_intr_status(adapter); | |
851b1642 | 1180 | |
2e48e349 CH |
1181 | if (!(val & PVSCSI_INTR_ALL_SUPPORTED)) |
1182 | return IRQ_NONE; | |
1183 | pvscsi_write_intr_status(devp, val); | |
1184 | return pvscsi_isr(irq, devp); | |
851b1642 AK |
1185 | } |
1186 | ||
1187 | static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) | |
1188 | { | |
1189 | struct pvscsi_ctx *ctx = adapter->cmd_map; | |
1190 | unsigned i; | |
1191 | ||
1192 | for (i = 0; i < adapter->req_depth; ++i, ++ctx) | |
1193 | free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); | |
1194 | } | |
1195 | ||
851b1642 AK |
1196 | static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) |
1197 | { | |
2e48e349 CH |
1198 | free_irq(pci_irq_vector(adapter->dev, 0), adapter); |
1199 | pci_free_irq_vectors(adapter->dev); | |
851b1642 AK |
1200 | } |
1201 | ||
1202 | static void pvscsi_release_resources(struct pvscsi_adapter *adapter) | |
1203 | { | |
851b1642 AK |
1204 | if (adapter->workqueue) |
1205 | destroy_workqueue(adapter->workqueue); | |
1206 | ||
1207 | if (adapter->mmioBase) | |
1208 | pci_iounmap(adapter->dev, adapter->mmioBase); | |
1209 | ||
1210 | pci_release_regions(adapter->dev); | |
1211 | ||
1212 | if (adapter->cmd_map) { | |
1213 | pvscsi_free_sgls(adapter); | |
1214 | kfree(adapter->cmd_map); | |
1215 | } | |
1216 | ||
1217 | if (adapter->rings_state) | |
9b7ca6c2 | 1218 | dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, |
851b1642 AK |
1219 | adapter->rings_state, adapter->ringStatePA); |
1220 | ||
1221 | if (adapter->req_ring) | |
9b7ca6c2 | 1222 | dma_free_coherent(&adapter->dev->dev, |
851b1642 AK |
1223 | adapter->req_pages * PAGE_SIZE, |
1224 | adapter->req_ring, adapter->reqRingPA); | |
1225 | ||
1226 | if (adapter->cmp_ring) | |
9b7ca6c2 | 1227 | dma_free_coherent(&adapter->dev->dev, |
851b1642 AK |
1228 | adapter->cmp_pages * PAGE_SIZE, |
1229 | adapter->cmp_ring, adapter->cmpRingPA); | |
1230 | ||
1231 | if (adapter->msg_ring) | |
9b7ca6c2 | 1232 | dma_free_coherent(&adapter->dev->dev, |
851b1642 AK |
1233 | adapter->msg_pages * PAGE_SIZE, |
1234 | adapter->msg_ring, adapter->msgRingPA); | |
1235 | } | |
1236 | ||
1237 | /* | |
1238 | * Allocate scatter gather lists. | |
1239 | * | |
1240 | * These are statically allocated. Trying to be clever was not worth it. | |
1241 | * | |
42b2aa86 | 1242 | * Dynamic allocation can fail, and we can't go deep into the memory |
851b1642 AK |
1243 | * allocator, since we're a SCSI driver, and trying too hard to allocate |
1244 | * memory might generate disk I/O. We also don't want to fail disk I/O | |
1245 | * in that case because we can't get an allocation - the I/O could be | |
1246 | * trying to swap out data to free memory. Since that is pathological, | |
1247 | * just use a statically allocated scatter list. | |
1248 | * | |
1249 | */ | |
6f039790 | 1250 | static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter) |
851b1642 AK |
1251 | { |
1252 | struct pvscsi_ctx *ctx; | |
1253 | int i; | |
1254 | ||
1255 | ctx = adapter->cmd_map; | |
1256 | BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); | |
1257 | ||
1258 | for (i = 0; i < adapter->req_depth; ++i, ++ctx) { | |
1259 | ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, | |
1260 | get_order(SGL_SIZE)); | |
1261 | ctx->sglPA = 0; | |
1262 | BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); | |
1263 | if (!ctx->sgl) { | |
1264 | for (; i >= 0; --i, --ctx) { | |
1265 | free_pages((unsigned long)ctx->sgl, | |
1266 | get_order(SGL_SIZE)); | |
1267 | ctx->sgl = NULL; | |
1268 | } | |
1269 | return -ENOMEM; | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | return 0; | |
1274 | } | |
1275 | ||
a9310735 AK |
1276 | /* |
1277 | * Query the device, fetch the config info and return the | |
1278 | * maximum number of targets on the adapter. In case of | |
1279 | * failure due to any reason return default i.e. 16. | |
1280 | */ | |
1281 | static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) | |
1282 | { | |
1283 | struct PVSCSICmdDescConfigCmd cmd; | |
1284 | struct PVSCSIConfigPageHeader *header; | |
1285 | struct device *dev; | |
1286 | dma_addr_t configPagePA; | |
1287 | void *config_page; | |
1288 | u32 numPhys = 16; | |
1289 | ||
1290 | dev = pvscsi_dev(adapter); | |
9b7ca6c2 CH |
1291 | config_page = dma_alloc_coherent(&adapter->dev->dev, PAGE_SIZE, |
1292 | &configPagePA, GFP_KERNEL); | |
a9310735 AK |
1293 | if (!config_page) { |
1294 | dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); | |
1295 | goto exit; | |
1296 | } | |
1297 | BUG_ON(configPagePA & ~PAGE_MASK); | |
1298 | ||
1299 | /* Fetch config info from the device. */ | |
1300 | cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32; | |
1301 | cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER; | |
1302 | cmd.cmpAddr = configPagePA; | |
1303 | cmd._pad = 0; | |
1304 | ||
1305 | /* | |
1306 | * Mark the completion page header with error values. If the device | |
1307 | * completes the command successfully, it sets the status values to | |
1308 | * indicate success. | |
1309 | */ | |
1310 | header = config_page; | |
1311 | memset(header, 0, sizeof *header); | |
1312 | header->hostStatus = BTSTAT_INVPARAM; | |
1313 | header->scsiStatus = SDSTAT_CHECK; | |
1314 | ||
1315 | pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd); | |
1316 | ||
1317 | if (header->hostStatus == BTSTAT_SUCCESS && | |
1318 | header->scsiStatus == SDSTAT_GOOD) { | |
1319 | struct PVSCSIConfigPageController *config; | |
1320 | ||
1321 | config = config_page; | |
1322 | numPhys = config->numPhys; | |
1323 | } else | |
1324 | dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", | |
1325 | header->hostStatus, header->scsiStatus); | |
9b7ca6c2 CH |
1326 | dma_free_coherent(&adapter->dev->dev, PAGE_SIZE, config_page, |
1327 | configPagePA); | |
a9310735 AK |
1328 | exit: |
1329 | return numPhys; | |
1330 | } | |
1331 | ||
6f039790 | 1332 | static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
851b1642 | 1333 | { |
2e48e349 | 1334 | unsigned int irq_flag = PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_LEGACY; |
851b1642 | 1335 | struct pvscsi_adapter *adapter; |
02845560 AK |
1336 | struct pvscsi_adapter adapter_temp; |
1337 | struct Scsi_Host *host = NULL; | |
851b1642 | 1338 | unsigned int i; |
851b1642 | 1339 | int error; |
02845560 | 1340 | u32 max_id; |
851b1642 AK |
1341 | |
1342 | error = -ENODEV; | |
1343 | ||
1344 | if (pci_enable_device(pdev)) | |
1345 | return error; | |
1346 | ||
9b7ca6c2 | 1347 | if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { |
851b1642 | 1348 | printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); |
9b7ca6c2 | 1349 | } else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) { |
851b1642 AK |
1350 | printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); |
1351 | } else { | |
1352 | printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); | |
1353 | goto out_disable_device; | |
1354 | } | |
1355 | ||
02845560 AK |
1356 | /* |
1357 | * Let's use a temp pvscsi_adapter struct until we find the number of | |
1358 | * targets on the adapter, after that we will switch to the real | |
1359 | * allocated struct. | |
1360 | */ | |
1361 | adapter = &adapter_temp; | |
851b1642 AK |
1362 | memset(adapter, 0, sizeof(*adapter)); |
1363 | adapter->dev = pdev; | |
851b1642 AK |
1364 | adapter->rev = pdev->revision; |
1365 | ||
1366 | if (pci_request_regions(pdev, "vmw_pvscsi")) { | |
1367 | printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); | |
02845560 | 1368 | goto out_disable_device; |
851b1642 AK |
1369 | } |
1370 | ||
1371 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | |
1372 | if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) | |
1373 | continue; | |
1374 | ||
1375 | if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) | |
1376 | continue; | |
1377 | ||
1378 | break; | |
1379 | } | |
1380 | ||
1381 | if (i == DEVICE_COUNT_RESOURCE) { | |
1382 | printk(KERN_ERR | |
1383 | "vmw_pvscsi: adapter has no suitable MMIO region\n"); | |
02845560 | 1384 | goto out_release_resources_and_disable; |
851b1642 AK |
1385 | } |
1386 | ||
1387 | adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); | |
1388 | ||
1389 | if (!adapter->mmioBase) { | |
1390 | printk(KERN_ERR | |
1391 | "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", | |
1392 | i, PVSCSI_MEM_SPACE_SIZE); | |
02845560 | 1393 | goto out_release_resources_and_disable; |
851b1642 AK |
1394 | } |
1395 | ||
1396 | pci_set_master(pdev); | |
02845560 AK |
1397 | |
1398 | /* | |
1399 | * Ask the device for max number of targets before deciding the | |
1400 | * default pvscsi_ring_pages value. | |
1401 | */ | |
1402 | max_id = pvscsi_get_max_targets(adapter); | |
1403 | printk(KERN_INFO "vmw_pvscsi: max_id: %u\n", max_id); | |
1404 | ||
1405 | if (pvscsi_ring_pages == 0) | |
1406 | /* | |
1407 | * Set the right default value. Up to 16 it is 8, above it is | |
1408 | * max. | |
1409 | */ | |
1410 | pvscsi_ring_pages = (max_id > 16) ? | |
1411 | PVSCSI_SETUP_RINGS_MAX_NUM_PAGES : | |
1412 | PVSCSI_DEFAULT_NUM_PAGES_PER_RING; | |
1413 | printk(KERN_INFO | |
1414 | "vmw_pvscsi: setting ring_pages to %d\n", | |
1415 | pvscsi_ring_pages); | |
1416 | ||
1417 | pvscsi_template.can_queue = | |
1418 | min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * | |
1419 | PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; | |
1420 | pvscsi_template.cmd_per_lun = | |
1421 | min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); | |
1422 | host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); | |
1423 | if (!host) { | |
1424 | printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); | |
1425 | goto out_release_resources_and_disable; | |
1426 | } | |
1427 | ||
1428 | /* | |
1429 | * Let's use the real pvscsi_adapter struct here onwards. | |
1430 | */ | |
1431 | adapter = shost_priv(host); | |
1432 | memset(adapter, 0, sizeof(*adapter)); | |
1433 | adapter->dev = pdev; | |
1434 | adapter->host = host; | |
1435 | /* | |
1436 | * Copy back what we already have to the allocated adapter struct. | |
1437 | */ | |
1438 | adapter->rev = adapter_temp.rev; | |
1439 | adapter->mmioBase = adapter_temp.mmioBase; | |
1440 | ||
1441 | spin_lock_init(&adapter->hw_lock); | |
1442 | host->max_channel = 0; | |
1443 | host->max_lun = 1; | |
1444 | host->max_cmd_len = 16; | |
1445 | host->max_id = max_id; | |
1446 | ||
851b1642 AK |
1447 | pci_set_drvdata(pdev, host); |
1448 | ||
1449 | ll_adapter_reset(adapter); | |
1450 | ||
1451 | adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); | |
1452 | ||
1453 | error = pvscsi_allocate_rings(adapter); | |
1454 | if (error) { | |
1455 | printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); | |
1456 | goto out_release_resources; | |
1457 | } | |
1458 | ||
1459 | /* | |
1460 | * From this point on we should reset the adapter if anything goes | |
1461 | * wrong. | |
1462 | */ | |
1463 | pvscsi_setup_all_rings(adapter); | |
1464 | ||
1465 | adapter->cmd_map = kcalloc(adapter->req_depth, | |
1466 | sizeof(struct pvscsi_ctx), GFP_KERNEL); | |
1467 | if (!adapter->cmd_map) { | |
1468 | printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); | |
1469 | error = -ENOMEM; | |
1470 | goto out_reset_adapter; | |
1471 | } | |
1472 | ||
1473 | INIT_LIST_HEAD(&adapter->cmd_pool); | |
1474 | for (i = 0; i < adapter->req_depth; i++) { | |
1475 | struct pvscsi_ctx *ctx = adapter->cmd_map + i; | |
1476 | list_add(&ctx->list, &adapter->cmd_pool); | |
1477 | } | |
1478 | ||
1479 | error = pvscsi_allocate_sg(adapter); | |
1480 | if (error) { | |
1481 | printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); | |
1482 | goto out_reset_adapter; | |
1483 | } | |
1484 | ||
2e48e349 CH |
1485 | if (pvscsi_disable_msix) |
1486 | irq_flag &= ~PCI_IRQ_MSIX; | |
1487 | if (pvscsi_disable_msi) | |
1488 | irq_flag &= ~PCI_IRQ_MSI; | |
1489 | ||
1490 | error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag); | |
c527de41 | 1491 | if (error < 0) |
2e48e349 | 1492 | goto out_reset_adapter; |
851b1642 | 1493 | |
2a815b5a RM |
1494 | adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true); |
1495 | printk(KERN_DEBUG "vmw_pvscsi: driver-based request coalescing %sabled\n", | |
1496 | adapter->use_req_threshold ? "en" : "dis"); | |
1497 | ||
2e48e349 CH |
1498 | if (adapter->dev->msix_enabled || adapter->dev->msi_enabled) { |
1499 | printk(KERN_INFO "vmw_pvscsi: using MSI%s\n", | |
1500 | adapter->dev->msix_enabled ? "-X" : ""); | |
1501 | error = request_irq(pci_irq_vector(pdev, 0), pvscsi_isr, | |
1502 | 0, "vmw_pvscsi", adapter); | |
1503 | } else { | |
1504 | printk(KERN_INFO "vmw_pvscsi: using INTx\n"); | |
1505 | error = request_irq(pci_irq_vector(pdev, 0), pvscsi_shared_isr, | |
1506 | IRQF_SHARED, "vmw_pvscsi", adapter); | |
1507 | } | |
1508 | ||
851b1642 AK |
1509 | if (error) { |
1510 | printk(KERN_ERR | |
1511 | "vmw_pvscsi: unable to request IRQ: %d\n", error); | |
851b1642 AK |
1512 | goto out_reset_adapter; |
1513 | } | |
1514 | ||
1515 | error = scsi_add_host(host, &pdev->dev); | |
1516 | if (error) { | |
1517 | printk(KERN_ERR | |
1518 | "vmw_pvscsi: scsi_add_host failed: %d\n", error); | |
1519 | goto out_reset_adapter; | |
1520 | } | |
1521 | ||
1522 | dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", | |
1523 | adapter->rev, host->host_no); | |
1524 | ||
1525 | pvscsi_unmask_intr(adapter); | |
1526 | ||
1527 | scsi_scan_host(host); | |
1528 | ||
1529 | return 0; | |
1530 | ||
1531 | out_reset_adapter: | |
1532 | ll_adapter_reset(adapter); | |
1533 | out_release_resources: | |
02f425f8 | 1534 | pvscsi_shutdown_intr(adapter); |
851b1642 | 1535 | pvscsi_release_resources(adapter); |
851b1642 AK |
1536 | scsi_host_put(host); |
1537 | out_disable_device: | |
851b1642 AK |
1538 | pci_disable_device(pdev); |
1539 | ||
1540 | return error; | |
02845560 AK |
1541 | |
1542 | out_release_resources_and_disable: | |
02f425f8 | 1543 | pvscsi_shutdown_intr(adapter); |
02845560 AK |
1544 | pvscsi_release_resources(adapter); |
1545 | goto out_disable_device; | |
851b1642 AK |
1546 | } |
1547 | ||
1548 | static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) | |
1549 | { | |
1550 | pvscsi_mask_intr(adapter); | |
1551 | ||
1552 | if (adapter->workqueue) | |
1553 | flush_workqueue(adapter->workqueue); | |
1554 | ||
1555 | pvscsi_shutdown_intr(adapter); | |
1556 | ||
1557 | pvscsi_process_request_ring(adapter); | |
1558 | pvscsi_process_completion_ring(adapter); | |
1559 | ll_adapter_reset(adapter); | |
1560 | } | |
1561 | ||
1562 | static void pvscsi_shutdown(struct pci_dev *dev) | |
1563 | { | |
1564 | struct Scsi_Host *host = pci_get_drvdata(dev); | |
1565 | struct pvscsi_adapter *adapter = shost_priv(host); | |
1566 | ||
1567 | __pvscsi_shutdown(adapter); | |
1568 | } | |
1569 | ||
1570 | static void pvscsi_remove(struct pci_dev *pdev) | |
1571 | { | |
1572 | struct Scsi_Host *host = pci_get_drvdata(pdev); | |
1573 | struct pvscsi_adapter *adapter = shost_priv(host); | |
1574 | ||
1575 | scsi_remove_host(host); | |
1576 | ||
1577 | __pvscsi_shutdown(adapter); | |
1578 | pvscsi_release_resources(adapter); | |
1579 | ||
1580 | scsi_host_put(host); | |
1581 | ||
851b1642 AK |
1582 | pci_disable_device(pdev); |
1583 | } | |
1584 | ||
1585 | static struct pci_driver pvscsi_pci_driver = { | |
1586 | .name = "vmw_pvscsi", | |
1587 | .id_table = pvscsi_pci_tbl, | |
1588 | .probe = pvscsi_probe, | |
6f039790 | 1589 | .remove = pvscsi_remove, |
851b1642 AK |
1590 | .shutdown = pvscsi_shutdown, |
1591 | }; | |
1592 | ||
1593 | static int __init pvscsi_init(void) | |
1594 | { | |
1595 | pr_info("%s - version %s\n", | |
1596 | PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); | |
1597 | return pci_register_driver(&pvscsi_pci_driver); | |
1598 | } | |
1599 | ||
1600 | static void __exit pvscsi_exit(void) | |
1601 | { | |
1602 | pci_unregister_driver(&pvscsi_pci_driver); | |
1603 | } | |
1604 | ||
1605 | module_init(pvscsi_init); | |
1606 | module_exit(pvscsi_exit); |