Commit | Line | Data |
---|---|---|
c8806b6c NM |
1 | /* |
2 | * Copyright 2014 Cisco Systems, Inc. All rights reserved. | |
3 | * | |
4 | * This program is free software; you may redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License as published by | |
6 | * the Free Software Foundation; version 2 of the License. | |
7 | * | |
8 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
9 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
10 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
11 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
12 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
13 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
14 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
15 | * SOFTWARE. | |
16 | */ | |
17 | ||
18 | #include <linux/module.h> | |
19 | #include <linux/mempool.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/pci.h> | |
25 | #include <linux/skbuff.h> | |
26 | #include <linux/interrupt.h> | |
27 | #include <linux/spinlock.h> | |
28 | #include <linux/workqueue.h> | |
29 | #include <scsi/scsi_host.h> | |
30 | #include <scsi/scsi_tcq.h> | |
31 | ||
32 | #include "snic.h" | |
33 | #include "snic_fwint.h" | |
34 | ||
35 | #define PCI_DEVICE_ID_CISCO_SNIC 0x0046 | |
36 | ||
37 | /* Supported devices by snic module */ | |
38 | static struct pci_device_id snic_id_table[] = { | |
39 | {PCI_DEVICE(0x1137, PCI_DEVICE_ID_CISCO_SNIC) }, | |
40 | { 0, } /* end of table */ | |
41 | }; | |
42 | ||
43 | unsigned int snic_log_level = 0x0; | |
44 | module_param(snic_log_level, int, S_IRUGO|S_IWUSR); | |
45 | MODULE_PARM_DESC(snic_log_level, "bitmask for snic logging levels"); | |
46 | ||
47 | #ifdef CONFIG_SCSI_SNIC_DEBUG_FS | |
48 | unsigned int snic_trace_max_pages = 16; | |
49 | module_param(snic_trace_max_pages, uint, S_IRUGO|S_IWUSR); | |
50 | MODULE_PARM_DESC(snic_trace_max_pages, | |
51 | "Total allocated memory pages for snic trace buffer"); | |
52 | ||
53 | #endif | |
54 | unsigned int snic_max_qdepth = SNIC_DFLT_QUEUE_DEPTH; | |
55 | module_param(snic_max_qdepth, uint, S_IRUGO | S_IWUSR); | |
56 | MODULE_PARM_DESC(snic_max_qdepth, "Queue depth to report for each LUN"); | |
57 | ||
58 | /* | |
59 | * snic_slave_alloc : callback function to SCSI Mid Layer, called on | |
60 | * scsi device initialization. | |
61 | */ | |
62 | static int | |
63 | snic_slave_alloc(struct scsi_device *sdev) | |
64 | { | |
65 | struct snic_tgt *tgt = starget_to_tgt(scsi_target(sdev)); | |
66 | ||
67 | if (!tgt || snic_tgt_chkready(tgt)) | |
68 | return -ENXIO; | |
69 | ||
70 | return 0; | |
71 | } | |
72 | ||
73 | /* | |
74 | * snic_slave_configure : callback function to SCSI Mid Layer, called on | |
75 | * scsi device initialization. | |
76 | */ | |
77 | static int | |
78 | snic_slave_configure(struct scsi_device *sdev) | |
79 | { | |
80 | struct snic *snic = shost_priv(sdev->host); | |
81 | u32 qdepth = 0, max_ios = 0; | |
82 | int tmo = SNIC_DFLT_CMD_TIMEOUT * HZ; | |
83 | ||
84 | /* Set Queue Depth */ | |
85 | max_ios = snic_max_qdepth; | |
86 | qdepth = min_t(u32, max_ios, SNIC_MAX_QUEUE_DEPTH); | |
87 | scsi_change_queue_depth(sdev, qdepth); | |
88 | ||
89 | if (snic->fwinfo.io_tmo > 1) | |
90 | tmo = snic->fwinfo.io_tmo * HZ; | |
91 | ||
92 | /* FW requires extended timeouts */ | |
93 | blk_queue_rq_timeout(sdev->request_queue, tmo); | |
94 | ||
95 | return 0; | |
96 | } | |
97 | ||
98 | static int | |
99 | snic_change_queue_depth(struct scsi_device *sdev, int qdepth) | |
100 | { | |
3f5c11a4 | 101 | struct snic *snic = shost_priv(sdev->host); |
c8806b6c NM |
102 | int qsz = 0; |
103 | ||
104 | qsz = min_t(u32, qdepth, SNIC_MAX_QUEUE_DEPTH); | |
3f5c11a4 NM |
105 | if (qsz < sdev->queue_depth) |
106 | atomic64_inc(&snic->s_stats.misc.qsz_rampdown); | |
107 | else if (qsz > sdev->queue_depth) | |
108 | atomic64_inc(&snic->s_stats.misc.qsz_rampup); | |
109 | ||
110 | atomic64_set(&snic->s_stats.misc.last_qsz, sdev->queue_depth); | |
111 | ||
c8806b6c | 112 | scsi_change_queue_depth(sdev, qsz); |
c8806b6c NM |
113 | |
114 | return sdev->queue_depth; | |
115 | } | |
116 | ||
117 | static struct scsi_host_template snic_host_template = { | |
118 | .module = THIS_MODULE, | |
119 | .name = SNIC_DRV_NAME, | |
120 | .queuecommand = snic_queuecommand, | |
121 | .eh_abort_handler = snic_abort_cmd, | |
122 | .eh_device_reset_handler = snic_device_reset, | |
123 | .eh_host_reset_handler = snic_host_reset, | |
124 | .slave_alloc = snic_slave_alloc, | |
125 | .slave_configure = snic_slave_configure, | |
126 | .change_queue_depth = snic_change_queue_depth, | |
127 | .this_id = -1, | |
128 | .cmd_per_lun = SNIC_DFLT_QUEUE_DEPTH, | |
129 | .can_queue = SNIC_MAX_IO_REQ, | |
c8806b6c NM |
130 | .sg_tablesize = SNIC_MAX_SG_DESC_CNT, |
131 | .max_sectors = 0x800, | |
132 | .shost_attrs = snic_attrs, | |
c8806b6c NM |
133 | .track_queue_depth = 1, |
134 | .cmd_size = sizeof(struct snic_internal_io_state), | |
135 | .proc_name = "snic_scsi", | |
136 | }; | |
137 | ||
138 | /* | |
139 | * snic_handle_link_event : Handles link events such as link up/down/error | |
140 | */ | |
141 | void | |
142 | snic_handle_link_event(struct snic *snic) | |
143 | { | |
144 | unsigned long flags; | |
145 | ||
146 | spin_lock_irqsave(&snic->snic_lock, flags); | |
147 | if (snic->stop_link_events) { | |
148 | spin_unlock_irqrestore(&snic->snic_lock, flags); | |
149 | ||
150 | return; | |
151 | } | |
152 | spin_unlock_irqrestore(&snic->snic_lock, flags); | |
153 | ||
154 | queue_work(snic_glob->event_q, &snic->link_work); | |
155 | } /* end of snic_handle_link_event */ | |
156 | ||
157 | /* | |
158 | * snic_notify_set : sets notification area | |
159 | * This notification area is to receive events from fw | |
160 | * Note: snic supports only MSIX interrupts, in which we can just call | |
161 | * svnic_dev_notify_set directly | |
162 | */ | |
163 | static int | |
164 | snic_notify_set(struct snic *snic) | |
165 | { | |
166 | int ret = 0; | |
167 | enum vnic_dev_intr_mode intr_mode; | |
168 | ||
169 | intr_mode = svnic_dev_get_intr_mode(snic->vdev); | |
170 | ||
171 | if (intr_mode == VNIC_DEV_INTR_MODE_MSIX) { | |
172 | ret = svnic_dev_notify_set(snic->vdev, SNIC_MSIX_ERR_NOTIFY); | |
173 | } else { | |
174 | SNIC_HOST_ERR(snic->shost, | |
175 | "Interrupt mode should be setup before devcmd notify set %d\n", | |
176 | intr_mode); | |
177 | ret = -1; | |
178 | } | |
179 | ||
180 | return ret; | |
181 | } /* end of snic_notify_set */ | |
182 | ||
183 | /* | |
184 | * snic_dev_wait : polls vnic open status. | |
185 | */ | |
186 | static int | |
187 | snic_dev_wait(struct vnic_dev *vdev, | |
188 | int (*start)(struct vnic_dev *, int), | |
189 | int (*finished)(struct vnic_dev *, int *), | |
190 | int arg) | |
191 | { | |
192 | unsigned long time; | |
193 | int ret, done; | |
194 | int retry_cnt = 0; | |
195 | ||
196 | ret = start(vdev, arg); | |
197 | if (ret) | |
198 | return ret; | |
199 | ||
200 | /* | |
201 | * Wait for func to complete...2 seconds max. | |
202 | * | |
203 | * Sometimes schedule_timeout_uninterruptible take long time | |
204 | * to wakeup, which results skipping retry. The retry counter | |
205 | * ensures to retry at least two times. | |
206 | */ | |
207 | time = jiffies + (HZ * 2); | |
208 | do { | |
209 | ret = finished(vdev, &done); | |
210 | if (ret) | |
211 | return ret; | |
212 | ||
213 | if (done) | |
214 | return 0; | |
215 | schedule_timeout_uninterruptible(HZ/10); | |
216 | ++retry_cnt; | |
217 | } while (time_after(time, jiffies) || (retry_cnt < 3)); | |
218 | ||
219 | return -ETIMEDOUT; | |
220 | } /* end of snic_dev_wait */ | |
221 | ||
222 | /* | |
223 | * snic_cleanup: called by snic_remove | |
224 | * Stops the snic device, masks all interrupts, Completed CQ entries are | |
225 | * drained. Posted WQ/RQ/Copy-WQ entries are cleanup | |
226 | */ | |
227 | static int | |
228 | snic_cleanup(struct snic *snic) | |
229 | { | |
230 | unsigned int i; | |
231 | int ret; | |
232 | ||
233 | svnic_dev_disable(snic->vdev); | |
234 | for (i = 0; i < snic->intr_count; i++) | |
235 | svnic_intr_mask(&snic->intr[i]); | |
236 | ||
237 | for (i = 0; i < snic->wq_count; i++) { | |
238 | ret = svnic_wq_disable(&snic->wq[i]); | |
239 | if (ret) | |
240 | return ret; | |
241 | } | |
242 | ||
243 | /* Clean up completed IOs */ | |
244 | snic_fwcq_cmpl_handler(snic, -1); | |
245 | ||
246 | snic_wq_cmpl_handler(snic, -1); | |
247 | ||
248 | /* Clean up the IOs that have not completed */ | |
249 | for (i = 0; i < snic->wq_count; i++) | |
250 | svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); | |
251 | ||
252 | for (i = 0; i < snic->cq_count; i++) | |
253 | svnic_cq_clean(&snic->cq[i]); | |
254 | ||
255 | for (i = 0; i < snic->intr_count; i++) | |
256 | svnic_intr_clean(&snic->intr[i]); | |
257 | ||
258 | /* Cleanup snic specific requests */ | |
259 | snic_free_all_untagged_reqs(snic); | |
260 | ||
261 | /* Cleanup Pending SCSI commands */ | |
262 | snic_shutdown_scsi_cleanup(snic); | |
263 | ||
264 | for (i = 0; i < SNIC_REQ_MAX_CACHES; i++) | |
265 | mempool_destroy(snic->req_pool[i]); | |
266 | ||
267 | return 0; | |
268 | } /* end of snic_cleanup */ | |
269 | ||
270 | ||
271 | static void | |
272 | snic_iounmap(struct snic *snic) | |
273 | { | |
274 | if (snic->bar0.vaddr) | |
275 | iounmap(snic->bar0.vaddr); | |
276 | } | |
277 | ||
278 | /* | |
279 | * snic_vdev_open_done : polls for svnic_dev_open cmd completion. | |
280 | */ | |
281 | static int | |
282 | snic_vdev_open_done(struct vnic_dev *vdev, int *done) | |
283 | { | |
284 | struct snic *snic = svnic_dev_priv(vdev); | |
285 | int ret; | |
286 | int nretries = 5; | |
287 | ||
288 | do { | |
289 | ret = svnic_dev_open_done(vdev, done); | |
290 | if (ret == 0) | |
291 | break; | |
292 | ||
293 | SNIC_HOST_INFO(snic->shost, "VNIC_DEV_OPEN Timedout.\n"); | |
294 | } while (nretries--); | |
295 | ||
296 | return ret; | |
297 | } /* end of snic_vdev_open_done */ | |
298 | ||
299 | /* | |
300 | * snic_add_host : registers scsi host with ML | |
301 | */ | |
302 | static int | |
303 | snic_add_host(struct Scsi_Host *shost, struct pci_dev *pdev) | |
304 | { | |
305 | int ret = 0; | |
306 | ||
307 | ret = scsi_add_host(shost, &pdev->dev); | |
308 | if (ret) { | |
309 | SNIC_HOST_ERR(shost, | |
310 | "snic: scsi_add_host failed. %d\n", | |
311 | ret); | |
312 | ||
313 | return ret; | |
314 | } | |
315 | ||
316 | SNIC_BUG_ON(shost->work_q != NULL); | |
317 | snprintf(shost->work_q_name, sizeof(shost->work_q_name), "scsi_wq_%d", | |
318 | shost->host_no); | |
319 | shost->work_q = create_singlethread_workqueue(shost->work_q_name); | |
320 | if (!shost->work_q) { | |
321 | SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); | |
322 | ||
323 | ret = -ENOMEM; | |
324 | } | |
325 | ||
326 | return ret; | |
327 | } /* end of snic_add_host */ | |
328 | ||
329 | static void | |
330 | snic_del_host(struct Scsi_Host *shost) | |
331 | { | |
332 | if (!shost->work_q) | |
333 | return; | |
334 | ||
335 | destroy_workqueue(shost->work_q); | |
336 | shost->work_q = NULL; | |
337 | scsi_remove_host(shost); | |
338 | } | |
339 | ||
340 | int | |
341 | snic_get_state(struct snic *snic) | |
342 | { | |
343 | return atomic_read(&snic->state); | |
344 | } | |
345 | ||
346 | void | |
347 | snic_set_state(struct snic *snic, enum snic_state state) | |
348 | { | |
349 | SNIC_HOST_INFO(snic->shost, "snic state change from %s to %s\n", | |
350 | snic_state_to_str(snic_get_state(snic)), | |
351 | snic_state_to_str(state)); | |
352 | ||
353 | atomic_set(&snic->state, state); | |
354 | } | |
355 | ||
356 | /* | |
357 | * snic_probe : Initialize the snic interface. | |
358 | */ | |
359 | static int | |
360 | snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |
361 | { | |
362 | struct Scsi_Host *shost; | |
363 | struct snic *snic; | |
364 | mempool_t *pool; | |
365 | unsigned long flags; | |
366 | u32 max_ios = 0; | |
367 | int ret, i; | |
368 | ||
369 | /* Device Information */ | |
370 | SNIC_INFO("snic device %4x:%4x:%4x:%4x: ", | |
371 | pdev->vendor, pdev->device, pdev->subsystem_vendor, | |
372 | pdev->subsystem_device); | |
373 | ||
374 | SNIC_INFO("snic device bus %x: slot %x: fn %x\n", | |
375 | pdev->bus->number, PCI_SLOT(pdev->devfn), | |
376 | PCI_FUNC(pdev->devfn)); | |
377 | ||
378 | /* | |
379 | * Allocate SCSI Host and setup association between host, and snic | |
380 | */ | |
381 | shost = scsi_host_alloc(&snic_host_template, sizeof(struct snic)); | |
382 | if (!shost) { | |
383 | SNIC_ERR("Unable to alloc scsi_host\n"); | |
384 | ret = -ENOMEM; | |
385 | ||
386 | goto prob_end; | |
387 | } | |
388 | snic = shost_priv(shost); | |
389 | snic->shost = shost; | |
390 | ||
391 | snprintf(snic->name, sizeof(snic->name) - 1, "%s%d", SNIC_DRV_NAME, | |
392 | shost->host_no); | |
393 | ||
394 | SNIC_HOST_INFO(shost, | |
395 | "snic%d = %p shost = %p device bus %x: slot %x: fn %x\n", | |
396 | shost->host_no, snic, shost, pdev->bus->number, | |
397 | PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); | |
398 | #ifdef CONFIG_SCSI_SNIC_DEBUG_FS | |
399 | /* Per snic debugfs init */ | |
fd84ec20 | 400 | snic_stats_debugfs_init(snic); |
c8806b6c NM |
401 | #endif |
402 | ||
403 | /* Setup PCI Resources */ | |
404 | pci_set_drvdata(pdev, snic); | |
405 | snic->pdev = pdev; | |
406 | ||
407 | ret = pci_enable_device(pdev); | |
408 | if (ret) { | |
409 | SNIC_HOST_ERR(shost, | |
410 | "Cannot enable PCI Resources, aborting : %d\n", | |
411 | ret); | |
412 | ||
413 | goto err_free_snic; | |
414 | } | |
415 | ||
416 | ret = pci_request_regions(pdev, SNIC_DRV_NAME); | |
417 | if (ret) { | |
418 | SNIC_HOST_ERR(shost, | |
419 | "Cannot obtain PCI Resources, aborting : %d\n", | |
420 | ret); | |
421 | ||
422 | goto err_pci_disable; | |
423 | } | |
424 | ||
425 | pci_set_master(pdev); | |
426 | ||
427 | /* | |
428 | * Query PCI Controller on system for DMA addressing | |
429 | * limitation for the device. Try 43-bit first, and | |
430 | * fail to 32-bit. | |
431 | */ | |
cecfed31 | 432 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(43)); |
c8806b6c | 433 | if (ret) { |
cecfed31 | 434 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
c8806b6c NM |
435 | if (ret) { |
436 | SNIC_HOST_ERR(shost, | |
437 | "No Usable DMA Configuration, aborting %d\n", | |
438 | ret); | |
c8806b6c NM |
439 | goto err_rel_regions; |
440 | } | |
441 | } | |
c8806b6c NM |
442 | |
443 | /* Map vNIC resources from BAR0 */ | |
444 | if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { | |
445 | SNIC_HOST_ERR(shost, "BAR0 not memory mappable aborting.\n"); | |
446 | ||
447 | ret = -ENODEV; | |
448 | goto err_rel_regions; | |
449 | } | |
450 | ||
451 | snic->bar0.vaddr = pci_iomap(pdev, 0, 0); | |
452 | if (!snic->bar0.vaddr) { | |
453 | SNIC_HOST_ERR(shost, | |
454 | "Cannot memory map BAR0 res hdr aborting.\n"); | |
455 | ||
456 | ret = -ENODEV; | |
457 | goto err_rel_regions; | |
458 | } | |
459 | ||
460 | snic->bar0.bus_addr = pci_resource_start(pdev, 0); | |
461 | snic->bar0.len = pci_resource_len(pdev, 0); | |
462 | SNIC_BUG_ON(snic->bar0.bus_addr == 0); | |
463 | ||
464 | /* Devcmd2 Resource Allocation and Initialization */ | |
465 | snic->vdev = svnic_dev_alloc_discover(NULL, snic, pdev, &snic->bar0, 1); | |
466 | if (!snic->vdev) { | |
467 | SNIC_HOST_ERR(shost, "vNIC Resource Discovery Failed.\n"); | |
468 | ||
469 | ret = -ENODEV; | |
470 | goto err_iounmap; | |
471 | } | |
472 | ||
473 | ret = svnic_dev_cmd_init(snic->vdev, 0); | |
474 | if (ret) { | |
475 | SNIC_HOST_INFO(shost, "Devcmd2 Init Failed. err = %d\n", ret); | |
476 | ||
477 | goto err_vnic_unreg; | |
478 | } | |
479 | ||
480 | ret = snic_dev_wait(snic->vdev, svnic_dev_open, snic_vdev_open_done, 0); | |
481 | if (ret) { | |
482 | SNIC_HOST_ERR(shost, | |
483 | "vNIC dev open failed, aborting. %d\n", | |
484 | ret); | |
485 | ||
486 | goto err_vnic_unreg; | |
487 | } | |
488 | ||
489 | ret = svnic_dev_init(snic->vdev, 0); | |
490 | if (ret) { | |
491 | SNIC_HOST_ERR(shost, | |
492 | "vNIC dev init failed. aborting. %d\n", | |
493 | ret); | |
494 | ||
495 | goto err_dev_close; | |
496 | } | |
497 | ||
498 | /* Get vNIC information */ | |
499 | ret = snic_get_vnic_config(snic); | |
500 | if (ret) { | |
501 | SNIC_HOST_ERR(shost, | |
502 | "Get vNIC configuration failed, aborting. %d\n", | |
503 | ret); | |
504 | ||
505 | goto err_dev_close; | |
506 | } | |
507 | ||
508 | /* Configure Maximum Outstanding IO reqs */ | |
509 | max_ios = snic->config.io_throttle_count; | |
510 | if (max_ios != SNIC_UCSM_DFLT_THROTTLE_CNT_BLD) | |
511 | shost->can_queue = min_t(u32, SNIC_MAX_IO_REQ, | |
512 | max_t(u32, SNIC_MIN_IO_REQ, max_ios)); | |
513 | ||
514 | snic->max_tag_id = shost->can_queue; | |
515 | ||
c8806b6c NM |
516 | shost->max_lun = snic->config.luns_per_tgt; |
517 | shost->max_id = SNIC_MAX_TARGET; | |
518 | ||
519 | shost->max_cmd_len = MAX_COMMAND_SIZE; /*defined in scsi_cmnd.h*/ | |
520 | ||
521 | snic_get_res_counts(snic); | |
522 | ||
523 | /* | |
524 | * Assumption: Only MSIx is supported | |
525 | */ | |
526 | ret = snic_set_intr_mode(snic); | |
527 | if (ret) { | |
528 | SNIC_HOST_ERR(shost, | |
529 | "Failed to set intr mode aborting. %d\n", | |
530 | ret); | |
531 | ||
532 | goto err_dev_close; | |
533 | } | |
534 | ||
535 | ret = snic_alloc_vnic_res(snic); | |
536 | if (ret) { | |
537 | SNIC_HOST_ERR(shost, | |
538 | "Failed to alloc vNIC resources aborting. %d\n", | |
539 | ret); | |
540 | ||
541 | goto err_clear_intr; | |
542 | } | |
543 | ||
544 | /* Initialize specific lists */ | |
545 | INIT_LIST_HEAD(&snic->list); | |
546 | ||
547 | /* | |
548 | * spl_cmd_list for maintaining snic specific cmds | |
549 | * such as EXCH_VER_REQ, REPORT_TARGETS etc | |
550 | */ | |
551 | INIT_LIST_HEAD(&snic->spl_cmd_list); | |
552 | spin_lock_init(&snic->spl_cmd_lock); | |
553 | ||
554 | /* initialize all snic locks */ | |
555 | spin_lock_init(&snic->snic_lock); | |
556 | ||
557 | for (i = 0; i < SNIC_WQ_MAX; i++) | |
558 | spin_lock_init(&snic->wq_lock[i]); | |
559 | ||
560 | for (i = 0; i < SNIC_IO_LOCKS; i++) | |
561 | spin_lock_init(&snic->io_req_lock[i]); | |
562 | ||
563 | pool = mempool_create_slab_pool(2, | |
564 | snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); | |
565 | if (!pool) { | |
566 | SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n"); | |
567 | ||
0371adcd | 568 | ret = -ENOMEM; |
c8806b6c NM |
569 | goto err_free_res; |
570 | } | |
571 | ||
572 | snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL] = pool; | |
573 | ||
574 | pool = mempool_create_slab_pool(2, | |
575 | snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); | |
576 | if (!pool) { | |
577 | SNIC_HOST_ERR(shost, "max sgl pool creation failed\n"); | |
578 | ||
0371adcd | 579 | ret = -ENOMEM; |
c8806b6c NM |
580 | goto err_free_dflt_sgl_pool; |
581 | } | |
582 | ||
583 | snic->req_pool[SNIC_REQ_CACHE_MAX_SGL] = pool; | |
584 | ||
585 | pool = mempool_create_slab_pool(2, | |
586 | snic_glob->req_cache[SNIC_REQ_TM_CACHE]); | |
587 | if (!pool) { | |
588 | SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n"); | |
589 | ||
0371adcd | 590 | ret = -ENOMEM; |
c8806b6c NM |
591 | goto err_free_max_sgl_pool; |
592 | } | |
593 | ||
594 | snic->req_pool[SNIC_REQ_TM_CACHE] = pool; | |
595 | ||
596 | /* Initialize snic state */ | |
597 | atomic_set(&snic->state, SNIC_INIT); | |
598 | ||
599 | atomic_set(&snic->ios_inflight, 0); | |
600 | ||
601 | /* Setup notification buffer area */ | |
602 | ret = snic_notify_set(snic); | |
603 | if (ret) { | |
604 | SNIC_HOST_ERR(shost, | |
605 | "Failed to alloc notify buffer aborting. %d\n", | |
606 | ret); | |
607 | ||
608 | goto err_free_tmreq_pool; | |
609 | } | |
610 | ||
c8806b6c NM |
611 | spin_lock_irqsave(&snic_glob->snic_list_lock, flags); |
612 | list_add_tail(&snic->list, &snic_glob->snic_list); | |
613 | spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); | |
614 | ||
615 | snic_disc_init(&snic->disc); | |
616 | INIT_WORK(&snic->tgt_work, snic_handle_tgt_disc); | |
617 | INIT_WORK(&snic->disc_work, snic_handle_disc); | |
618 | INIT_WORK(&snic->link_work, snic_handle_link); | |
619 | ||
620 | /* Enable all queues */ | |
621 | for (i = 0; i < snic->wq_count; i++) | |
622 | svnic_wq_enable(&snic->wq[i]); | |
623 | ||
624 | ret = svnic_dev_enable_wait(snic->vdev); | |
625 | if (ret) { | |
626 | SNIC_HOST_ERR(shost, | |
627 | "vNIC dev enable failed w/ error %d\n", | |
628 | ret); | |
629 | ||
630 | goto err_vdev_enable; | |
631 | } | |
632 | ||
633 | ret = snic_request_intr(snic); | |
634 | if (ret) { | |
635 | SNIC_HOST_ERR(shost, "Unable to request irq. %d\n", ret); | |
636 | ||
637 | goto err_req_intr; | |
638 | } | |
639 | ||
640 | for (i = 0; i < snic->intr_count; i++) | |
641 | svnic_intr_unmask(&snic->intr[i]); | |
642 | ||
c8806b6c NM |
643 | /* Get snic params */ |
644 | ret = snic_get_conf(snic); | |
645 | if (ret) { | |
646 | SNIC_HOST_ERR(shost, | |
647 | "Failed to get snic io config from FW w err %d\n", | |
648 | ret); | |
649 | ||
650 | goto err_get_conf; | |
651 | } | |
652 | ||
be2a266d NM |
653 | /* |
654 | * Initialization done with PCI system, hardware, firmware. | |
655 | * Add shost to SCSI | |
656 | */ | |
657 | ret = snic_add_host(shost, pdev); | |
658 | if (ret) { | |
659 | SNIC_HOST_ERR(shost, | |
660 | "Adding scsi host Failed ... exiting. %d\n", | |
661 | ret); | |
662 | ||
663 | goto err_get_conf; | |
664 | } | |
665 | ||
666 | snic_set_state(snic, SNIC_ONLINE); | |
667 | ||
c8806b6c NM |
668 | ret = snic_disc_start(snic); |
669 | if (ret) { | |
670 | SNIC_HOST_ERR(shost, "snic_probe:Discovery Failed w err = %d\n", | |
671 | ret); | |
672 | ||
673 | goto err_get_conf; | |
674 | } | |
675 | ||
676 | SNIC_HOST_INFO(shost, "SNIC Device Probe Successful.\n"); | |
677 | ||
678 | return 0; | |
679 | ||
680 | err_get_conf: | |
681 | snic_free_all_untagged_reqs(snic); | |
682 | ||
683 | for (i = 0; i < snic->intr_count; i++) | |
684 | svnic_intr_mask(&snic->intr[i]); | |
685 | ||
686 | snic_free_intr(snic); | |
687 | ||
688 | err_req_intr: | |
689 | svnic_dev_disable(snic->vdev); | |
690 | ||
691 | err_vdev_enable: | |
be2a266d NM |
692 | svnic_dev_notify_unset(snic->vdev); |
693 | ||
c8806b6c NM |
694 | for (i = 0; i < snic->wq_count; i++) { |
695 | int rc = 0; | |
696 | ||
697 | rc = svnic_wq_disable(&snic->wq[i]); | |
698 | if (rc) { | |
699 | SNIC_HOST_ERR(shost, | |
700 | "WQ Disable Failed w/ err = %d\n", rc); | |
701 | ||
702 | break; | |
703 | } | |
704 | } | |
705 | snic_del_host(snic->shost); | |
706 | ||
c8806b6c NM |
707 | err_free_tmreq_pool: |
708 | mempool_destroy(snic->req_pool[SNIC_REQ_TM_CACHE]); | |
709 | ||
710 | err_free_max_sgl_pool: | |
711 | mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_MAX_SGL]); | |
712 | ||
713 | err_free_dflt_sgl_pool: | |
714 | mempool_destroy(snic->req_pool[SNIC_REQ_CACHE_DFLT_SGL]); | |
715 | ||
716 | err_free_res: | |
717 | snic_free_vnic_res(snic); | |
718 | ||
719 | err_clear_intr: | |
720 | snic_clear_intr_mode(snic); | |
721 | ||
722 | err_dev_close: | |
723 | svnic_dev_close(snic->vdev); | |
724 | ||
725 | err_vnic_unreg: | |
726 | svnic_dev_unregister(snic->vdev); | |
727 | ||
728 | err_iounmap: | |
729 | snic_iounmap(snic); | |
730 | ||
731 | err_rel_regions: | |
732 | pci_release_regions(pdev); | |
733 | ||
734 | err_pci_disable: | |
735 | pci_disable_device(pdev); | |
736 | ||
737 | err_free_snic: | |
738 | #ifdef CONFIG_SCSI_SNIC_DEBUG_FS | |
739 | snic_stats_debugfs_remove(snic); | |
740 | #endif | |
741 | scsi_host_put(shost); | |
742 | pci_set_drvdata(pdev, NULL); | |
743 | ||
744 | prob_end: | |
745 | SNIC_INFO("sNIC device : bus %d: slot %d: fn %d Registration Failed.\n", | |
746 | pdev->bus->number, PCI_SLOT(pdev->devfn), | |
747 | PCI_FUNC(pdev->devfn)); | |
748 | ||
749 | return ret; | |
750 | } /* end of snic_probe */ | |
751 | ||
752 | ||
753 | /* | |
754 | * snic_remove : invoked on unbinding the interface to cleanup the | |
755 | * resources allocated in snic_probe on initialization. | |
756 | */ | |
757 | static void | |
758 | snic_remove(struct pci_dev *pdev) | |
759 | { | |
760 | struct snic *snic = pci_get_drvdata(pdev); | |
761 | unsigned long flags; | |
762 | ||
763 | if (!snic) { | |
764 | SNIC_INFO("sNIC dev: bus %d slot %d fn %d snic inst is null.\n", | |
765 | pdev->bus->number, PCI_SLOT(pdev->devfn), | |
766 | PCI_FUNC(pdev->devfn)); | |
767 | ||
768 | return; | |
769 | } | |
770 | ||
771 | /* | |
772 | * Mark state so that the workqueue thread stops forwarding | |
773 | * received frames and link events. ISR and other threads | |
774 | * that can queue work items will also stop creating work | |
775 | * items on the snic workqueue | |
776 | */ | |
777 | snic_set_state(snic, SNIC_OFFLINE); | |
778 | spin_lock_irqsave(&snic->snic_lock, flags); | |
779 | snic->stop_link_events = 1; | |
780 | spin_unlock_irqrestore(&snic->snic_lock, flags); | |
781 | ||
782 | flush_workqueue(snic_glob->event_q); | |
783 | snic_disc_term(snic); | |
784 | ||
785 | spin_lock_irqsave(&snic->snic_lock, flags); | |
786 | snic->in_remove = 1; | |
787 | spin_unlock_irqrestore(&snic->snic_lock, flags); | |
788 | ||
789 | /* | |
790 | * This stops the snic device, masks all interrupts, Completed | |
791 | * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are | |
792 | * cleanup | |
793 | */ | |
794 | snic_cleanup(snic); | |
795 | ||
796 | spin_lock_irqsave(&snic_glob->snic_list_lock, flags); | |
797 | list_del(&snic->list); | |
798 | spin_unlock_irqrestore(&snic_glob->snic_list_lock, flags); | |
799 | ||
800 | snic_tgt_del_all(snic); | |
801 | #ifdef CONFIG_SCSI_SNIC_DEBUG_FS | |
802 | snic_stats_debugfs_remove(snic); | |
803 | #endif | |
804 | snic_del_host(snic->shost); | |
805 | ||
806 | svnic_dev_notify_unset(snic->vdev); | |
807 | snic_free_intr(snic); | |
808 | snic_free_vnic_res(snic); | |
809 | snic_clear_intr_mode(snic); | |
810 | svnic_dev_close(snic->vdev); | |
811 | svnic_dev_unregister(snic->vdev); | |
812 | snic_iounmap(snic); | |
813 | pci_release_regions(pdev); | |
814 | pci_disable_device(pdev); | |
815 | pci_set_drvdata(pdev, NULL); | |
816 | ||
817 | /* this frees Scsi_Host and snic memory (continuous chunk) */ | |
818 | scsi_host_put(snic->shost); | |
819 | } /* end of snic_remove */ | |
820 | ||
821 | ||
822 | struct snic_global *snic_glob; | |
823 | ||
824 | /* | |
825 | * snic_global_data_init: Initialize SNIC Global Data | |
826 | * Notes: All the global lists, variables should be part of global data | |
827 | * this helps in debugging. | |
828 | */ | |
829 | static int | |
830 | snic_global_data_init(void) | |
831 | { | |
832 | int ret = 0; | |
833 | struct kmem_cache *cachep; | |
834 | ssize_t len = 0; | |
835 | ||
836 | snic_glob = kzalloc(sizeof(*snic_glob), GFP_KERNEL); | |
837 | ||
838 | if (!snic_glob) { | |
839 | SNIC_ERR("Failed to allocate Global Context.\n"); | |
840 | ||
841 | ret = -ENOMEM; | |
842 | goto gdi_end; | |
843 | } | |
844 | ||
845 | #ifdef CONFIG_SCSI_SNIC_DEBUG_FS | |
846 | /* Debugfs related Initialization */ | |
847 | /* Create debugfs entries for snic */ | |
fd84ec20 | 848 | snic_debugfs_init(); |
c8806b6c NM |
849 | |
850 | /* Trace related Initialization */ | |
851 | /* Allocate memory for trace buffer */ | |
852 | ret = snic_trc_init(); | |
853 | if (ret < 0) { | |
854 | SNIC_ERR("Trace buffer init failed, SNIC tracing disabled\n"); | |
855 | snic_trc_free(); | |
856 | /* continue even if it fails */ | |
857 | } | |
858 | ||
859 | #endif | |
860 | INIT_LIST_HEAD(&snic_glob->snic_list); | |
861 | spin_lock_init(&snic_glob->snic_list_lock); | |
862 | ||
863 | /* Create a cache for allocation of snic_host_req+default size ESGLs */ | |
864 | len = sizeof(struct snic_req_info); | |
865 | len += sizeof(struct snic_host_req) + sizeof(struct snic_dflt_sgl); | |
866 | cachep = kmem_cache_create("snic_req_dfltsgl", len, SNIC_SG_DESC_ALIGN, | |
867 | SLAB_HWCACHE_ALIGN, NULL); | |
868 | if (!cachep) { | |
869 | SNIC_ERR("Failed to create snic default sgl slab\n"); | |
870 | ret = -ENOMEM; | |
871 | ||
872 | goto err_dflt_req_slab; | |
873 | } | |
874 | snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL] = cachep; | |
875 | ||
876 | /* Create a cache for allocation of max size Extended SGLs */ | |
877 | len = sizeof(struct snic_req_info); | |
878 | len += sizeof(struct snic_host_req) + sizeof(struct snic_max_sgl); | |
879 | cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, | |
880 | SLAB_HWCACHE_ALIGN, NULL); | |
881 | if (!cachep) { | |
882 | SNIC_ERR("Failed to create snic max sgl slab\n"); | |
883 | ret = -ENOMEM; | |
884 | ||
885 | goto err_max_req_slab; | |
886 | } | |
887 | snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL] = cachep; | |
888 | ||
889 | len = sizeof(struct snic_host_req); | |
890 | cachep = kmem_cache_create("snic_req_maxsgl", len, SNIC_SG_DESC_ALIGN, | |
891 | SLAB_HWCACHE_ALIGN, NULL); | |
892 | if (!cachep) { | |
893 | SNIC_ERR("Failed to create snic tm req slab\n"); | |
894 | ret = -ENOMEM; | |
895 | ||
896 | goto err_tmreq_slab; | |
897 | } | |
898 | snic_glob->req_cache[SNIC_REQ_TM_CACHE] = cachep; | |
899 | ||
900 | /* snic_event queue */ | |
901 | snic_glob->event_q = create_singlethread_workqueue("snic_event_wq"); | |
902 | if (!snic_glob->event_q) { | |
903 | SNIC_ERR("snic event queue create failed\n"); | |
904 | ret = -ENOMEM; | |
905 | ||
906 | goto err_eventq; | |
907 | } | |
908 | ||
909 | return ret; | |
910 | ||
911 | err_eventq: | |
912 | kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); | |
913 | ||
914 | err_tmreq_slab: | |
915 | kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); | |
916 | ||
917 | err_max_req_slab: | |
918 | kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); | |
919 | ||
920 | err_dflt_req_slab: | |
921 | #ifdef CONFIG_SCSI_SNIC_DEBUG_FS | |
922 | snic_trc_free(); | |
923 | snic_debugfs_term(); | |
924 | #endif | |
925 | kfree(snic_glob); | |
926 | snic_glob = NULL; | |
927 | ||
928 | gdi_end: | |
929 | return ret; | |
930 | } /* end of snic_glob_init */ | |
931 | ||
932 | /* | |
933 | * snic_global_data_cleanup : Frees SNIC Global Data | |
934 | */ | |
935 | static void | |
936 | snic_global_data_cleanup(void) | |
937 | { | |
938 | SNIC_BUG_ON(snic_glob == NULL); | |
939 | ||
940 | destroy_workqueue(snic_glob->event_q); | |
941 | kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_TM_CACHE]); | |
942 | kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_MAX_SGL]); | |
943 | kmem_cache_destroy(snic_glob->req_cache[SNIC_REQ_CACHE_DFLT_SGL]); | |
944 | ||
945 | #ifdef CONFIG_SCSI_SNIC_DEBUG_FS | |
946 | /* Freeing Trace Resources */ | |
947 | snic_trc_free(); | |
948 | ||
949 | /* Freeing Debugfs Resources */ | |
950 | snic_debugfs_term(); | |
951 | #endif | |
952 | kfree(snic_glob); | |
953 | snic_glob = NULL; | |
954 | } /* end of snic_glob_cleanup */ | |
955 | ||
956 | static struct pci_driver snic_driver = { | |
957 | .name = SNIC_DRV_NAME, | |
958 | .id_table = snic_id_table, | |
959 | .probe = snic_probe, | |
960 | .remove = snic_remove, | |
961 | }; | |
962 | ||
963 | static int __init | |
964 | snic_init_module(void) | |
965 | { | |
966 | int ret = 0; | |
967 | ||
968 | #ifndef __x86_64__ | |
969 | SNIC_INFO("SNIC Driver is supported only for x86_64 platforms!\n"); | |
970 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); | |
971 | #endif | |
972 | ||
973 | SNIC_INFO("%s, ver %s\n", SNIC_DRV_DESCRIPTION, SNIC_DRV_VERSION); | |
974 | ||
975 | ret = snic_global_data_init(); | |
976 | if (ret) { | |
977 | SNIC_ERR("Failed to Initialize Global Data.\n"); | |
978 | ||
979 | return ret; | |
980 | } | |
981 | ||
982 | ret = pci_register_driver(&snic_driver); | |
983 | if (ret < 0) { | |
984 | SNIC_ERR("PCI driver register error\n"); | |
985 | ||
986 | goto err_pci_reg; | |
987 | } | |
988 | ||
989 | return ret; | |
990 | ||
991 | err_pci_reg: | |
992 | snic_global_data_cleanup(); | |
993 | ||
994 | return ret; | |
995 | } | |
996 | ||
997 | static void __exit | |
998 | snic_cleanup_module(void) | |
999 | { | |
1000 | pci_unregister_driver(&snic_driver); | |
1001 | snic_global_data_cleanup(); | |
1002 | } | |
1003 | ||
1004 | module_init(snic_init_module); | |
1005 | module_exit(snic_cleanup_module); | |
1006 | ||
1007 | MODULE_LICENSE("GPL v2"); | |
1008 | MODULE_DESCRIPTION(SNIC_DRV_DESCRIPTION); | |
1009 | MODULE_VERSION(SNIC_DRV_VERSION); | |
1010 | MODULE_DEVICE_TABLE(pci, snic_id_table); | |
1011 | MODULE_AUTHOR("Narsimhulu Musini <nmusini@cisco.com>, " | |
1012 | "Sesidhar Baddela <sebaddel@cisco.com>"); |