[SCSI] be2iscsi: Remove Ring mode from driver
[linux-2.6-block.git] / drivers / scsi / be2iscsi / be_main.c
1 /**
2  * Copyright (C) 2005 - 2009 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/interrupt.h>
23 #include <linux/blkdev.h>
24 #include <linux/pci.h>
25 #include <linux/string.h>
26 #include <linux/kernel.h>
27 #include <linux/semaphore.h>
28
29 #include <scsi/libiscsi.h>
30 #include <scsi/scsi_transport_iscsi.h>
31 #include <scsi/scsi_transport.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include "be_main.h"
37 #include "be_iscsi.h"
38 #include "be_mgmt.h"
39
40 static unsigned int be_iopoll_budget = 10;
41 static unsigned int be_max_phys_size = 64;
42 static unsigned int enable_msix = 1;
43
44 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
46 MODULE_AUTHOR("ServerEngines Corporation");
47 MODULE_LICENSE("GPL");
48 module_param(be_iopoll_budget, int, 0);
49 module_param(enable_msix, int, 0);
50 module_param(be_max_phys_size, uint, S_IRUGO);
51 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
52                                    "contiguous memory that can be allocated."
53                                    "Range is 16 - 128");
54
55 static int beiscsi_slave_configure(struct scsi_device *sdev)
56 {
57         blk_queue_max_segment_size(sdev->request_queue, 65536);
58         return 0;
59 }
60
61 /*------------------- PCI Driver operations and data ----------------- */
62 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
63         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
64         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
65         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
66         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
67         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
68         { 0 }
69 };
70 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
71
72 static struct scsi_host_template beiscsi_sht = {
73         .module = THIS_MODULE,
74         .name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
75         .proc_name = DRV_NAME,
76         .queuecommand = iscsi_queuecommand,
77         .eh_abort_handler = iscsi_eh_abort,
78         .change_queue_depth = iscsi_change_queue_depth,
79         .slave_configure = beiscsi_slave_configure,
80         .target_alloc = iscsi_target_alloc,
81         .eh_device_reset_handler = iscsi_eh_device_reset,
82         .eh_target_reset_handler = iscsi_eh_target_reset,
83         .sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
84         .can_queue = BE2_IO_DEPTH,
85         .this_id = -1,
86         .max_sectors = BEISCSI_MAX_SECTORS,
87         .cmd_per_lun = BEISCSI_CMD_PER_LUN,
88         .use_clustering = ENABLE_CLUSTERING,
89 };
90
91 static struct scsi_transport_template *beiscsi_scsi_transport;
92
93 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
94 {
95         struct beiscsi_hba *phba;
96         struct Scsi_Host *shost;
97
98         shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
99         if (!shost) {
100                 dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
101                         "iscsi_host_alloc failed \n");
102                 return NULL;
103         }
104         shost->dma_boundary = pcidev->dma_mask;
105         shost->max_id = BE2_MAX_SESSIONS;
106         shost->max_channel = 0;
107         shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
108         shost->max_lun = BEISCSI_NUM_MAX_LUN;
109         shost->transportt = beiscsi_scsi_transport;
110         phba = iscsi_host_priv(shost);
111         memset(phba, 0, sizeof(*phba));
112         phba->shost = shost;
113         phba->pcidev = pci_dev_get(pcidev);
114         pci_set_drvdata(pcidev, phba);
115
116         if (iscsi_host_add(shost, &phba->pcidev->dev))
117                 goto free_devices;
118         return phba;
119
120 free_devices:
121         pci_dev_put(phba->pcidev);
122         iscsi_host_free(phba->shost);
123         return NULL;
124 }
125
126 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
127 {
128         if (phba->csr_va) {
129                 iounmap(phba->csr_va);
130                 phba->csr_va = NULL;
131         }
132         if (phba->db_va) {
133                 iounmap(phba->db_va);
134                 phba->db_va = NULL;
135         }
136         if (phba->pci_va) {
137                 iounmap(phba->pci_va);
138                 phba->pci_va = NULL;
139         }
140 }
141
142 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
143                                 struct pci_dev *pcidev)
144 {
145         u8 __iomem *addr;
146
147         addr = ioremap_nocache(pci_resource_start(pcidev, 2),
148                                pci_resource_len(pcidev, 2));
149         if (addr == NULL)
150                 return -ENOMEM;
151         phba->ctrl.csr = addr;
152         phba->csr_va = addr;
153         phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
154
155         addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
156         if (addr == NULL)
157                 goto pci_map_err;
158         phba->ctrl.db = addr;
159         phba->db_va = addr;
160         phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
161
162         addr = ioremap_nocache(pci_resource_start(pcidev, 1),
163                                pci_resource_len(pcidev, 1));
164         if (addr == NULL)
165                 goto pci_map_err;
166         phba->ctrl.pcicfg = addr;
167         phba->pci_va = addr;
168         phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
169         return 0;
170
171 pci_map_err:
172         beiscsi_unmap_pci_function(phba);
173         return -ENOMEM;
174 }
175
176 static int beiscsi_enable_pci(struct pci_dev *pcidev)
177 {
178         int ret;
179
180         ret = pci_enable_device(pcidev);
181         if (ret) {
182                 dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
183                         "failed. Returning -ENODEV\n");
184                 return ret;
185         }
186
187         pci_set_master(pcidev);
188         if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
189                 ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
190                 if (ret) {
191                         dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
192                         pci_disable_device(pcidev);
193                         return ret;
194                 }
195         }
196         return 0;
197 }
198
199 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
200 {
201         struct be_ctrl_info *ctrl = &phba->ctrl;
202         struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
203         struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
204         int status = 0;
205
206         ctrl->pdev = pdev;
207         status = beiscsi_map_pci_bars(phba, pdev);
208         if (status)
209                 return status;
210         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
211         mbox_mem_alloc->va = pci_alloc_consistent(pdev,
212                                                   mbox_mem_alloc->size,
213                                                   &mbox_mem_alloc->dma);
214         if (!mbox_mem_alloc->va) {
215                 beiscsi_unmap_pci_function(phba);
216                 status = -ENOMEM;
217                 return status;
218         }
219
220         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
221         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
222         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
223         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
224         spin_lock_init(&ctrl->mbox_lock);
225         spin_lock_init(&phba->ctrl.mcc_lock);
226         spin_lock_init(&phba->ctrl.mcc_cq_lock);
227
228         return status;
229 }
230
231 static void beiscsi_get_params(struct beiscsi_hba *phba)
232 {
233         phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
234                                     - (phba->fw_config.iscsi_cid_count
235                                     + BE2_TMFS
236                                     + BE2_NOPOUT_REQ));
237         phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
238         phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
239         phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
240         phba->params.num_sge_per_io = BE2_SGE;
241         phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
242         phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
243         phba->params.eq_timer = 64;
244         phba->params.num_eq_entries =
245             (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
246                                     + BE2_TMFS) / 512) + 1) * 512;
247         phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
248                                 ? 1024 : phba->params.num_eq_entries;
249         SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
250                              phba->params.num_eq_entries);
251         phba->params.num_cq_entries =
252             (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
253                                     + BE2_TMFS) / 512) + 1) * 512;
254         phba->params.wrbs_per_cxn = 256;
255 }
256
257 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
258                            unsigned int id, unsigned int clr_interrupt,
259                            unsigned int num_processed,
260                            unsigned char rearm, unsigned char event)
261 {
262         u32 val = 0;
263         val |= id & DB_EQ_RING_ID_MASK;
264         if (rearm)
265                 val |= 1 << DB_EQ_REARM_SHIFT;
266         if (clr_interrupt)
267                 val |= 1 << DB_EQ_CLR_SHIFT;
268         if (event)
269                 val |= 1 << DB_EQ_EVNT_SHIFT;
270         val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
271         iowrite32(val, phba->db_va + DB_EQ_OFFSET);
272 }
273
274 /**
275  * be_isr_mcc - The isr routine of the driver.
276  * @irq: Not used
277  * @dev_id: Pointer to host adapter structure
278  */
279 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
280 {
281         struct beiscsi_hba *phba;
282         struct be_eq_entry *eqe = NULL;
283         struct be_queue_info *eq;
284         struct be_queue_info *mcc;
285         unsigned int num_eq_processed;
286         struct be_eq_obj *pbe_eq;
287         unsigned long flags;
288
289         pbe_eq = dev_id;
290         eq = &pbe_eq->q;
291         phba =  pbe_eq->phba;
292         mcc = &phba->ctrl.mcc_obj.cq;
293         eqe = queue_tail_node(eq);
294         if (!eqe)
295                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
296
297         num_eq_processed = 0;
298
299         while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
300                                 & EQE_VALID_MASK) {
301                 if (((eqe->dw[offsetof(struct amap_eq_entry,
302                      resource_id) / 32] &
303                      EQE_RESID_MASK) >> 16) == mcc->id) {
304                         spin_lock_irqsave(&phba->isr_lock, flags);
305                         phba->todo_mcc_cq = 1;
306                         spin_unlock_irqrestore(&phba->isr_lock, flags);
307                 }
308                 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
309                 queue_tail_inc(eq);
310                 eqe = queue_tail_node(eq);
311                 num_eq_processed++;
312         }
313         if (phba->todo_mcc_cq)
314                 queue_work(phba->wq, &phba->work_cqs);
315         if (num_eq_processed)
316                 hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
317
318         return IRQ_HANDLED;
319 }
320
321 /**
322  * be_isr_msix - The isr routine of the driver.
323  * @irq: Not used
324  * @dev_id: Pointer to host adapter structure
325  */
326 static irqreturn_t be_isr_msix(int irq, void *dev_id)
327 {
328         struct beiscsi_hba *phba;
329         struct be_eq_entry *eqe = NULL;
330         struct be_queue_info *eq;
331         struct be_queue_info *cq;
332         unsigned int num_eq_processed;
333         struct be_eq_obj *pbe_eq;
334         unsigned long flags;
335
336         pbe_eq = dev_id;
337         eq = &pbe_eq->q;
338         cq = pbe_eq->cq;
339         eqe = queue_tail_node(eq);
340         if (!eqe)
341                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
342
343         phba = pbe_eq->phba;
344         num_eq_processed = 0;
345         if (blk_iopoll_enabled) {
346                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
347                                         & EQE_VALID_MASK) {
348                         if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
349                                 blk_iopoll_sched(&pbe_eq->iopoll);
350
351                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
352                         queue_tail_inc(eq);
353                         eqe = queue_tail_node(eq);
354                         num_eq_processed++;
355                 }
356                 if (num_eq_processed)
357                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
358
359                 return IRQ_HANDLED;
360         } else {
361                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
362                                                 & EQE_VALID_MASK) {
363                         spin_lock_irqsave(&phba->isr_lock, flags);
364                         phba->todo_cq = 1;
365                         spin_unlock_irqrestore(&phba->isr_lock, flags);
366                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
367                         queue_tail_inc(eq);
368                         eqe = queue_tail_node(eq);
369                         num_eq_processed++;
370                 }
371                 if (phba->todo_cq)
372                         queue_work(phba->wq, &phba->work_cqs);
373
374                 if (num_eq_processed)
375                         hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
376
377                 return IRQ_HANDLED;
378         }
379 }
380
381 /**
382  * be_isr - The isr routine of the driver.
383  * @irq: Not used
384  * @dev_id: Pointer to host adapter structure
385  */
386 static irqreturn_t be_isr(int irq, void *dev_id)
387 {
388         struct beiscsi_hba *phba;
389         struct hwi_controller *phwi_ctrlr;
390         struct hwi_context_memory *phwi_context;
391         struct be_eq_entry *eqe = NULL;
392         struct be_queue_info *eq;
393         struct be_queue_info *cq;
394         struct be_queue_info *mcc;
395         unsigned long flags, index;
396         unsigned int num_mcceq_processed, num_ioeq_processed;
397         struct be_ctrl_info *ctrl;
398         struct be_eq_obj *pbe_eq;
399         int isr;
400
401         phba = dev_id;
402         ctrl = &phba->ctrl;;
403         isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
404                        (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
405         if (!isr)
406                 return IRQ_NONE;
407
408         phwi_ctrlr = phba->phwi_ctrlr;
409         phwi_context = phwi_ctrlr->phwi_ctxt;
410         pbe_eq = &phwi_context->be_eq[0];
411
412         eq = &phwi_context->be_eq[0].q;
413         mcc = &phba->ctrl.mcc_obj.cq;
414         index = 0;
415         eqe = queue_tail_node(eq);
416         if (!eqe)
417                 SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
418
419         num_ioeq_processed = 0;
420         num_mcceq_processed = 0;
421         if (blk_iopoll_enabled) {
422                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
423                                         & EQE_VALID_MASK) {
424                         if (((eqe->dw[offsetof(struct amap_eq_entry,
425                              resource_id) / 32] &
426                              EQE_RESID_MASK) >> 16) == mcc->id) {
427                                 spin_lock_irqsave(&phba->isr_lock, flags);
428                                 phba->todo_mcc_cq = 1;
429                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
430                                 num_mcceq_processed++;
431                         } else {
432                                 if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
433                                         blk_iopoll_sched(&pbe_eq->iopoll);
434                                 num_ioeq_processed++;
435                         }
436                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
437                         queue_tail_inc(eq);
438                         eqe = queue_tail_node(eq);
439                 }
440                 if (num_ioeq_processed || num_mcceq_processed) {
441                         if (phba->todo_mcc_cq)
442                                 queue_work(phba->wq, &phba->work_cqs);
443
444                         if ((num_mcceq_processed) && (!num_ioeq_processed))
445                                 hwi_ring_eq_db(phba, eq->id, 0,
446                                               (num_ioeq_processed +
447                                                num_mcceq_processed) , 1, 1);
448                         else
449                                 hwi_ring_eq_db(phba, eq->id, 0,
450                                                (num_ioeq_processed +
451                                                 num_mcceq_processed), 0, 1);
452
453                         return IRQ_HANDLED;
454                 } else
455                         return IRQ_NONE;
456         } else {
457                 cq = &phwi_context->be_cq[0];
458                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
459                                                 & EQE_VALID_MASK) {
460
461                         if (((eqe->dw[offsetof(struct amap_eq_entry,
462                              resource_id) / 32] &
463                              EQE_RESID_MASK) >> 16) != cq->id) {
464                                 spin_lock_irqsave(&phba->isr_lock, flags);
465                                 phba->todo_mcc_cq = 1;
466                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
467                         } else {
468                                 spin_lock_irqsave(&phba->isr_lock, flags);
469                                 phba->todo_cq = 1;
470                                 spin_unlock_irqrestore(&phba->isr_lock, flags);
471                         }
472                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
473                         queue_tail_inc(eq);
474                         eqe = queue_tail_node(eq);
475                         num_ioeq_processed++;
476                 }
477                 if (phba->todo_cq || phba->todo_mcc_cq)
478                         queue_work(phba->wq, &phba->work_cqs);
479
480                 if (num_ioeq_processed) {
481                         hwi_ring_eq_db(phba, eq->id, 0,
482                                        num_ioeq_processed, 1, 1);
483                         return IRQ_HANDLED;
484                 } else
485                         return IRQ_NONE;
486         }
487 }
488
489 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
490 {
491         struct pci_dev *pcidev = phba->pcidev;
492         struct hwi_controller *phwi_ctrlr;
493         struct hwi_context_memory *phwi_context;
494         int ret, msix_vec, i = 0;
495         char desc[32];
496
497         phwi_ctrlr = phba->phwi_ctrlr;
498         phwi_context = phwi_ctrlr->phwi_ctxt;
499
500         if (phba->msix_enabled) {
501                 for (i = 0; i < phba->num_cpus; i++) {
502                         sprintf(desc, "beiscsi_msix_%04x", i);
503                         msix_vec = phba->msix_entries[i].vector;
504                         ret = request_irq(msix_vec, be_isr_msix, 0, desc,
505                                           &phwi_context->be_eq[i]);
506                 }
507                 msix_vec = phba->msix_entries[i].vector;
508                 ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
509                                   &phwi_context->be_eq[i]);
510         } else {
511                 ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
512                                   "beiscsi", phba);
513                 if (ret) {
514                         shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
515                                      "Failed to register irq\\n");
516                         return ret;
517                 }
518         }
519         return 0;
520 }
521
522 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
523                            unsigned int id, unsigned int num_processed,
524                            unsigned char rearm, unsigned char event)
525 {
526         u32 val = 0;
527         val |= id & DB_CQ_RING_ID_MASK;
528         if (rearm)
529                 val |= 1 << DB_CQ_REARM_SHIFT;
530         val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
531         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
532 }
533
534 static unsigned int
535 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
536                           struct beiscsi_hba *phba,
537                           unsigned short cid,
538                           struct pdu_base *ppdu,
539                           unsigned long pdu_len,
540                           void *pbuffer, unsigned long buf_len)
541 {
542         struct iscsi_conn *conn = beiscsi_conn->conn;
543         struct iscsi_session *session = conn->session;
544         struct iscsi_task *task;
545         struct beiscsi_io_task *io_task;
546         struct iscsi_hdr *login_hdr;
547
548         switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
549                                                 PDUBASE_OPCODE_MASK) {
550         case ISCSI_OP_NOOP_IN:
551                 pbuffer = NULL;
552                 buf_len = 0;
553                 break;
554         case ISCSI_OP_ASYNC_EVENT:
555                 break;
556         case ISCSI_OP_REJECT:
557                 WARN_ON(!pbuffer);
558                 WARN_ON(!(buf_len == 48));
559                 SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
560                 break;
561         case ISCSI_OP_LOGIN_RSP:
562         case ISCSI_OP_TEXT_RSP:
563                 task = conn->login_task;
564                 io_task = task->dd_data;
565                 login_hdr = (struct iscsi_hdr *)ppdu;
566                 login_hdr->itt = io_task->libiscsi_itt;
567                 break;
568         default:
569                 shost_printk(KERN_WARNING, phba->shost,
570                              "Unrecognized opcode 0x%x in async msg \n",
571                              (ppdu->
572                              dw[offsetof(struct amap_pdu_base, opcode) / 32]
573                                                 & PDUBASE_OPCODE_MASK));
574                 return 1;
575         }
576
577         spin_lock_bh(&session->lock);
578         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
579         spin_unlock_bh(&session->lock);
580         return 0;
581 }
582
583 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
584 {
585         struct sgl_handle *psgl_handle;
586
587         if (phba->io_sgl_hndl_avbl) {
588                 SE_DEBUG(DBG_LVL_8,
589                          "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
590                          phba->io_sgl_alloc_index);
591                 psgl_handle = phba->io_sgl_hndl_base[phba->
592                                                 io_sgl_alloc_index];
593                 phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
594                 phba->io_sgl_hndl_avbl--;
595                 if (phba->io_sgl_alloc_index == (phba->params.
596                                                  ios_per_ctrl - 1))
597                         phba->io_sgl_alloc_index = 0;
598                 else
599                         phba->io_sgl_alloc_index++;
600         } else
601                 psgl_handle = NULL;
602         return psgl_handle;
603 }
604
605 static void
606 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
607 {
608         SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
609                  phba->io_sgl_free_index);
610         if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
611                 /*
612                  * this can happen if clean_task is called on a task that
613                  * failed in xmit_task or alloc_pdu.
614                  */
615                  SE_DEBUG(DBG_LVL_8,
616                          "Double Free in IO SGL io_sgl_free_index=%d,"
617                          "value there=%p \n", phba->io_sgl_free_index,
618                          phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
619                 return;
620         }
621         phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
622         phba->io_sgl_hndl_avbl++;
623         if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
624                 phba->io_sgl_free_index = 0;
625         else
626                 phba->io_sgl_free_index++;
627 }
628
629 /**
630  * alloc_wrb_handle - To allocate a wrb handle
631  * @phba: The hba pointer
632  * @cid: The cid to use for allocation
633  *
634  * This happens under session_lock until submission to chip
635  */
636 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
637 {
638         struct hwi_wrb_context *pwrb_context;
639         struct hwi_controller *phwi_ctrlr;
640         struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
641
642         phwi_ctrlr = phba->phwi_ctrlr;
643         pwrb_context = &phwi_ctrlr->wrb_context[cid];
644         if (pwrb_context->wrb_handles_available >= 2) {
645                 pwrb_handle = pwrb_context->pwrb_handle_base[
646                                             pwrb_context->alloc_index];
647                 pwrb_context->wrb_handles_available--;
648                 if (pwrb_context->alloc_index ==
649                                                 (phba->params.wrbs_per_cxn - 1))
650                         pwrb_context->alloc_index = 0;
651                 else
652                         pwrb_context->alloc_index++;
653                 pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
654                                                 pwrb_context->alloc_index];
655                 pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
656         } else
657                 pwrb_handle = NULL;
658         return pwrb_handle;
659 }
660
661 /**
662  * free_wrb_handle - To free the wrb handle back to pool
663  * @phba: The hba pointer
664  * @pwrb_context: The context to free from
665  * @pwrb_handle: The wrb_handle to free
666  *
667  * This happens under session_lock until submission to chip
668  */
669 static void
670 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
671                 struct wrb_handle *pwrb_handle)
672 {
673         pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
674         pwrb_context->wrb_handles_available++;
675         if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
676                 pwrb_context->free_index = 0;
677         else
678                 pwrb_context->free_index++;
679
680         SE_DEBUG(DBG_LVL_8,
681                  "FREE WRB: pwrb_handle=%p free_index=0x%x"
682                  "wrb_handles_available=%d \n",
683                  pwrb_handle, pwrb_context->free_index,
684                  pwrb_context->wrb_handles_available);
685 }
686
687 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
688 {
689         struct sgl_handle *psgl_handle;
690
691         if (phba->eh_sgl_hndl_avbl) {
692                 psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
693                 phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
694                 SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
695                          phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
696                 phba->eh_sgl_hndl_avbl--;
697                 if (phba->eh_sgl_alloc_index ==
698                     (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
699                      1))
700                         phba->eh_sgl_alloc_index = 0;
701                 else
702                         phba->eh_sgl_alloc_index++;
703         } else
704                 psgl_handle = NULL;
705         return psgl_handle;
706 }
707
708 void
709 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
710 {
711
712         SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
713                              phba->eh_sgl_free_index);
714         if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
715                 /*
716                  * this can happen if clean_task is called on a task that
717                  * failed in xmit_task or alloc_pdu.
718                  */
719                 SE_DEBUG(DBG_LVL_8,
720                          "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
721                          phba->eh_sgl_free_index);
722                 return;
723         }
724         phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
725         phba->eh_sgl_hndl_avbl++;
726         if (phba->eh_sgl_free_index ==
727             (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
728                 phba->eh_sgl_free_index = 0;
729         else
730                 phba->eh_sgl_free_index++;
731 }
732
733 static void
734 be_complete_io(struct beiscsi_conn *beiscsi_conn,
735                struct iscsi_task *task, struct sol_cqe *psol)
736 {
737         struct beiscsi_io_task *io_task = task->dd_data;
738         struct be_status_bhs *sts_bhs =
739                                 (struct be_status_bhs *)io_task->cmd_bhs;
740         struct iscsi_conn *conn = beiscsi_conn->conn;
741         unsigned int sense_len;
742         unsigned char *sense;
743         u32 resid = 0, exp_cmdsn, max_cmdsn;
744         u8 rsp, status, flags;
745
746         exp_cmdsn = (psol->
747                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
748                         & SOL_EXP_CMD_SN_MASK);
749         max_cmdsn = ((psol->
750                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
751                         & SOL_EXP_CMD_SN_MASK) +
752                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
753                                 / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
754         rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
755                                                 & SOL_RESP_MASK) >> 16);
756         status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
757                                                 & SOL_STS_MASK) >> 8);
758         flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
759                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
760
761         task->sc->result = (DID_OK << 16) | status;
762         if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
763                 task->sc->result = DID_ERROR << 16;
764                 goto unmap;
765         }
766
767         /* bidi not initially supported */
768         if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
769                 resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
770                                 32] & SOL_RES_CNT_MASK);
771
772                 if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
773                         task->sc->result = DID_ERROR << 16;
774
775                 if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
776                         scsi_set_resid(task->sc, resid);
777                         if (!status && (scsi_bufflen(task->sc) - resid <
778                             task->sc->underflow))
779                                 task->sc->result = DID_ERROR << 16;
780                 }
781         }
782
783         if (status == SAM_STAT_CHECK_CONDITION) {
784                 unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
785                 sense = sts_bhs->sense_info + sizeof(unsigned short);
786                 sense_len =  cpu_to_be16(*slen);
787                 memcpy(task->sc->sense_buffer, sense,
788                        min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
789         }
790
791         if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
792                 if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
793                                                         & SOL_RES_CNT_MASK)
794                          conn->rxdata_octets += (psol->
795                              dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
796                              & SOL_RES_CNT_MASK);
797         }
798 unmap:
799         scsi_dma_unmap(io_task->scsi_cmnd);
800         iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
801 }
802
803 static void
804 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
805                    struct iscsi_task *task, struct sol_cqe *psol)
806 {
807         struct iscsi_logout_rsp *hdr;
808         struct beiscsi_io_task *io_task = task->dd_data;
809         struct iscsi_conn *conn = beiscsi_conn->conn;
810
811         hdr = (struct iscsi_logout_rsp *)task->hdr;
812         hdr->opcode = ISCSI_OP_LOGOUT_RSP;
813         hdr->t2wait = 5;
814         hdr->t2retain = 0;
815         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
816                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
817         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
818                                         32] & SOL_RESP_MASK);
819         hdr->exp_cmdsn = cpu_to_be32(psol->
820                         dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
821                                         & SOL_EXP_CMD_SN_MASK);
822         hdr->max_cmdsn = be32_to_cpu((psol->
823                          dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
824                                         & SOL_EXP_CMD_SN_MASK) +
825                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
826                                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
827         hdr->dlength[0] = 0;
828         hdr->dlength[1] = 0;
829         hdr->dlength[2] = 0;
830         hdr->hlength = 0;
831         hdr->itt = io_task->libiscsi_itt;
832         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
833 }
834
835 static void
836 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
837                 struct iscsi_task *task, struct sol_cqe *psol)
838 {
839         struct iscsi_tm_rsp *hdr;
840         struct iscsi_conn *conn = beiscsi_conn->conn;
841         struct beiscsi_io_task *io_task = task->dd_data;
842
843         hdr = (struct iscsi_tm_rsp *)task->hdr;
844         hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
845         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
846                                         & SOL_FLAGS_MASK) >> 24) | 0x80;
847         hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
848                                         32] & SOL_RESP_MASK);
849         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
850                                     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
851         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
852                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
853                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
854                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
855         hdr->itt = io_task->libiscsi_itt;
856         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
857 }
858
859 static void
860 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
861                        struct beiscsi_hba *phba, struct sol_cqe *psol)
862 {
863         struct hwi_wrb_context *pwrb_context;
864         struct wrb_handle *pwrb_handle = NULL;
865         struct hwi_controller *phwi_ctrlr;
866         struct iscsi_task *task;
867         struct beiscsi_io_task *io_task;
868         struct iscsi_conn *conn = beiscsi_conn->conn;
869         struct iscsi_session *session = conn->session;
870
871         phwi_ctrlr = phba->phwi_ctrlr;
872         pwrb_context = &phwi_ctrlr->wrb_context[((psol->
873                                 dw[offsetof(struct amap_sol_cqe, cid) / 32] &
874                                 SOL_CID_MASK) >> 6) -
875                                 phba->fw_config.iscsi_cid_start];
876         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
877                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
878                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
879         task = pwrb_handle->pio_handle;
880
881         io_task = task->dd_data;
882         spin_lock(&phba->mgmt_sgl_lock);
883         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
884         spin_unlock(&phba->mgmt_sgl_lock);
885         spin_lock_bh(&session->lock);
886         free_wrb_handle(phba, pwrb_context, pwrb_handle);
887         spin_unlock_bh(&session->lock);
888 }
889
890 static void
891 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
892                        struct iscsi_task *task, struct sol_cqe *psol)
893 {
894         struct iscsi_nopin *hdr;
895         struct iscsi_conn *conn = beiscsi_conn->conn;
896         struct beiscsi_io_task *io_task = task->dd_data;
897
898         hdr = (struct iscsi_nopin *)task->hdr;
899         hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
900                         & SOL_FLAGS_MASK) >> 24) | 0x80;
901         hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
902                                      i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
903         hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
904                         i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
905                         ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
906                         / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
907         hdr->opcode = ISCSI_OP_NOOP_IN;
908         hdr->itt = io_task->libiscsi_itt;
909         __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
910 }
911
912 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
913                              struct beiscsi_hba *phba, struct sol_cqe *psol)
914 {
915         struct hwi_wrb_context *pwrb_context;
916         struct wrb_handle *pwrb_handle;
917         struct iscsi_wrb *pwrb = NULL;
918         struct hwi_controller *phwi_ctrlr;
919         struct iscsi_task *task;
920         unsigned int type;
921         struct iscsi_conn *conn = beiscsi_conn->conn;
922         struct iscsi_session *session = conn->session;
923
924         phwi_ctrlr = phba->phwi_ctrlr;
925         pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
926                                 (struct amap_sol_cqe, cid) / 32]
927                                 & SOL_CID_MASK) >> 6) -
928                                 phba->fw_config.iscsi_cid_start];
929         pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
930                                 dw[offsetof(struct amap_sol_cqe, wrb_index) /
931                                 32] & SOL_WRB_INDEX_MASK) >> 16)];
932         task = pwrb_handle->pio_handle;
933         pwrb = pwrb_handle->pwrb;
934         type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
935                                  WRB_TYPE_MASK) >> 28;
936
937         spin_lock_bh(&session->lock);
938         switch (type) {
939         case HWH_TYPE_IO:
940         case HWH_TYPE_IO_RD:
941                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
942                     ISCSI_OP_NOOP_OUT) {
943                         be_complete_nopin_resp(beiscsi_conn, task, psol);
944                 } else
945                         be_complete_io(beiscsi_conn, task, psol);
946                 break;
947
948         case HWH_TYPE_LOGOUT:
949                 be_complete_logout(beiscsi_conn, task, psol);
950                 break;
951
952         case HWH_TYPE_LOGIN:
953                 SE_DEBUG(DBG_LVL_1,
954                          "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
955                          "- Solicited path \n");
956                 break;
957
958         case HWH_TYPE_TMF:
959                 be_complete_tmf(beiscsi_conn, task, psol);
960                 break;
961
962         case HWH_TYPE_NOP:
963                 be_complete_nopin_resp(beiscsi_conn, task, psol);
964                 break;
965
966         default:
967                 shost_printk(KERN_WARNING, phba->shost,
968                                 "In hwi_complete_cmd, unknown type = %d"
969                                 "wrb_index 0x%x CID 0x%x\n", type,
970                                 ((psol->dw[offsetof(struct amap_iscsi_wrb,
971                                 type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
972                                 ((psol->dw[offsetof(struct amap_sol_cqe,
973                                 cid) / 32] & SOL_CID_MASK) >> 6));
974                 break;
975         }
976
977         spin_unlock_bh(&session->lock);
978 }
979
980 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
981                                           *pasync_ctx, unsigned int is_header,
982                                           unsigned int host_write_ptr)
983 {
984         if (is_header)
985                 return &pasync_ctx->async_entry[host_write_ptr].
986                     header_busy_list;
987         else
988                 return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
989 }
990
991 static struct async_pdu_handle *
992 hwi_get_async_handle(struct beiscsi_hba *phba,
993                      struct beiscsi_conn *beiscsi_conn,
994                      struct hwi_async_pdu_context *pasync_ctx,
995                      struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
996 {
997         struct be_bus_address phys_addr;
998         struct list_head *pbusy_list;
999         struct async_pdu_handle *pasync_handle = NULL;
1000         int buffer_len = 0;
1001         unsigned char buffer_index = -1;
1002         unsigned char is_header = 0;
1003
1004         phys_addr.u.a32.address_lo =
1005             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1006             ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1007                                                 & PDUCQE_DPL_MASK) >> 16);
1008         phys_addr.u.a32.address_hi =
1009             pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1010
1011         phys_addr.u.a64.address =
1012                         *((unsigned long long *)(&phys_addr.u.a64.address));
1013
1014         switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1015                         & PDUCQE_CODE_MASK) {
1016         case UNSOL_HDR_NOTIFY:
1017                 is_header = 1;
1018
1019                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1020                         (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1021                         index) / 32] & PDUCQE_INDEX_MASK));
1022
1023                 buffer_len = (unsigned int)(phys_addr.u.a64.address -
1024                                 pasync_ctx->async_header.pa_base.u.a64.address);
1025
1026                 buffer_index = buffer_len /
1027                                 pasync_ctx->async_header.buffer_size;
1028
1029                 break;
1030         case UNSOL_DATA_NOTIFY:
1031                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1032                                         dw[offsetof(struct amap_i_t_dpdu_cqe,
1033                                         index) / 32] & PDUCQE_INDEX_MASK));
1034                 buffer_len = (unsigned long)(phys_addr.u.a64.address -
1035                                         pasync_ctx->async_data.pa_base.u.
1036                                         a64.address);
1037                 buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1038                 break;
1039         default:
1040                 pbusy_list = NULL;
1041                 shost_printk(KERN_WARNING, phba->shost,
1042                         "Unexpected code=%d \n",
1043                          pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1044                                         code) / 32] & PDUCQE_CODE_MASK);
1045                 return NULL;
1046         }
1047
1048         WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1049         WARN_ON(list_empty(pbusy_list));
1050         list_for_each_entry(pasync_handle, pbusy_list, link) {
1051                 WARN_ON(pasync_handle->consumed);
1052                 if (pasync_handle->index == buffer_index)
1053                         break;
1054         }
1055
1056         WARN_ON(!pasync_handle);
1057
1058         pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1059                                              phba->fw_config.iscsi_cid_start;
1060         pasync_handle->is_header = is_header;
1061         pasync_handle->buffer_len = ((pdpdu_cqe->
1062                         dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1063                         & PDUCQE_DPL_MASK) >> 16);
1064
1065         *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1066                         index) / 32] & PDUCQE_INDEX_MASK);
1067         return pasync_handle;
1068 }
1069
1070 static unsigned int
1071 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1072                            unsigned int is_header, unsigned int cq_index)
1073 {
1074         struct list_head *pbusy_list;
1075         struct async_pdu_handle *pasync_handle;
1076         unsigned int num_entries, writables = 0;
1077         unsigned int *pep_read_ptr, *pwritables;
1078
1079
1080         if (is_header) {
1081                 pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1082                 pwritables = &pasync_ctx->async_header.writables;
1083                 num_entries = pasync_ctx->async_header.num_entries;
1084         } else {
1085                 pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1086                 pwritables = &pasync_ctx->async_data.writables;
1087                 num_entries = pasync_ctx->async_data.num_entries;
1088         }
1089
1090         while ((*pep_read_ptr) != cq_index) {
1091                 (*pep_read_ptr)++;
1092                 *pep_read_ptr = (*pep_read_ptr) % num_entries;
1093
1094                 pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1095                                                      *pep_read_ptr);
1096                 if (writables == 0)
1097                         WARN_ON(list_empty(pbusy_list));
1098
1099                 if (!list_empty(pbusy_list)) {
1100                         pasync_handle = list_entry(pbusy_list->next,
1101                                                    struct async_pdu_handle,
1102                                                    link);
1103                         WARN_ON(!pasync_handle);
1104                         pasync_handle->consumed = 1;
1105                 }
1106
1107                 writables++;
1108         }
1109
1110         if (!writables) {
1111                 SE_DEBUG(DBG_LVL_1,
1112                          "Duplicate notification received - index 0x%x!!\n",
1113                          cq_index);
1114                 WARN_ON(1);
1115         }
1116
1117         *pwritables = *pwritables + writables;
1118         return 0;
1119 }
1120
1121 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1122                                        unsigned int cri)
1123 {
1124         struct hwi_controller *phwi_ctrlr;
1125         struct hwi_async_pdu_context *pasync_ctx;
1126         struct async_pdu_handle *pasync_handle, *tmp_handle;
1127         struct list_head *plist;
1128         unsigned int i = 0;
1129
1130         phwi_ctrlr = phba->phwi_ctrlr;
1131         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1132
1133         plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1134
1135         list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1136                 list_del(&pasync_handle->link);
1137
1138                 if (i == 0) {
1139                         list_add_tail(&pasync_handle->link,
1140                                       &pasync_ctx->async_header.free_list);
1141                         pasync_ctx->async_header.free_entries++;
1142                         i++;
1143                 } else {
1144                         list_add_tail(&pasync_handle->link,
1145                                       &pasync_ctx->async_data.free_list);
1146                         pasync_ctx->async_data.free_entries++;
1147                         i++;
1148                 }
1149         }
1150
1151         INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1152         pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1153         pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1154         return 0;
1155 }
1156
1157 static struct phys_addr *
1158 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1159                      unsigned int is_header, unsigned int host_write_ptr)
1160 {
1161         struct phys_addr *pasync_sge = NULL;
1162
1163         if (is_header)
1164                 pasync_sge = pasync_ctx->async_header.ring_base;
1165         else
1166                 pasync_sge = pasync_ctx->async_data.ring_base;
1167
1168         return pasync_sge + host_write_ptr;
1169 }
1170
1171 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1172                                    unsigned int is_header)
1173 {
1174         struct hwi_controller *phwi_ctrlr;
1175         struct hwi_async_pdu_context *pasync_ctx;
1176         struct async_pdu_handle *pasync_handle;
1177         struct list_head *pfree_link, *pbusy_list;
1178         struct phys_addr *pasync_sge;
1179         unsigned int ring_id, num_entries;
1180         unsigned int host_write_num;
1181         unsigned int writables;
1182         unsigned int i = 0;
1183         u32 doorbell = 0;
1184
1185         phwi_ctrlr = phba->phwi_ctrlr;
1186         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1187
1188         if (is_header) {
1189                 num_entries = pasync_ctx->async_header.num_entries;
1190                 writables = min(pasync_ctx->async_header.writables,
1191                                 pasync_ctx->async_header.free_entries);
1192                 pfree_link = pasync_ctx->async_header.free_list.next;
1193                 host_write_num = pasync_ctx->async_header.host_write_ptr;
1194                 ring_id = phwi_ctrlr->default_pdu_hdr.id;
1195         } else {
1196                 num_entries = pasync_ctx->async_data.num_entries;
1197                 writables = min(pasync_ctx->async_data.writables,
1198                                 pasync_ctx->async_data.free_entries);
1199                 pfree_link = pasync_ctx->async_data.free_list.next;
1200                 host_write_num = pasync_ctx->async_data.host_write_ptr;
1201                 ring_id = phwi_ctrlr->default_pdu_data.id;
1202         }
1203
1204         writables = (writables / 8) * 8;
1205         if (writables) {
1206                 for (i = 0; i < writables; i++) {
1207                         pbusy_list =
1208                             hwi_get_async_busy_list(pasync_ctx, is_header,
1209                                                     host_write_num);
1210                         pasync_handle =
1211                             list_entry(pfree_link, struct async_pdu_handle,
1212                                                                 link);
1213                         WARN_ON(!pasync_handle);
1214                         pasync_handle->consumed = 0;
1215
1216                         pfree_link = pfree_link->next;
1217
1218                         pasync_sge = hwi_get_ring_address(pasync_ctx,
1219                                                 is_header, host_write_num);
1220
1221                         pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1222                         pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1223
1224                         list_move(&pasync_handle->link, pbusy_list);
1225
1226                         host_write_num++;
1227                         host_write_num = host_write_num % num_entries;
1228                 }
1229
1230                 if (is_header) {
1231                         pasync_ctx->async_header.host_write_ptr =
1232                                                         host_write_num;
1233                         pasync_ctx->async_header.free_entries -= writables;
1234                         pasync_ctx->async_header.writables -= writables;
1235                         pasync_ctx->async_header.busy_entries += writables;
1236                 } else {
1237                         pasync_ctx->async_data.host_write_ptr = host_write_num;
1238                         pasync_ctx->async_data.free_entries -= writables;
1239                         pasync_ctx->async_data.writables -= writables;
1240                         pasync_ctx->async_data.busy_entries += writables;
1241                 }
1242
1243                 doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1244                 doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1245                 doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1246                 doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1247                                         << DB_DEF_PDU_CQPROC_SHIFT;
1248
1249                 iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1250         }
1251 }
1252
1253 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1254                                          struct beiscsi_conn *beiscsi_conn,
1255                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1256 {
1257         struct hwi_controller *phwi_ctrlr;
1258         struct hwi_async_pdu_context *pasync_ctx;
1259         struct async_pdu_handle *pasync_handle = NULL;
1260         unsigned int cq_index = -1;
1261
1262         phwi_ctrlr = phba->phwi_ctrlr;
1263         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1264
1265         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1266                                              pdpdu_cqe, &cq_index);
1267         BUG_ON(pasync_handle->is_header != 0);
1268         if (pasync_handle->consumed == 0)
1269                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1270                                            cq_index);
1271
1272         hwi_free_async_msg(phba, pasync_handle->cri);
1273         hwi_post_async_buffers(phba, pasync_handle->is_header);
1274 }
1275
1276 static unsigned int
1277 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1278                   struct beiscsi_hba *phba,
1279                   struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1280 {
1281         struct list_head *plist;
1282         struct async_pdu_handle *pasync_handle;
1283         void *phdr = NULL;
1284         unsigned int hdr_len = 0, buf_len = 0;
1285         unsigned int status, index = 0, offset = 0;
1286         void *pfirst_buffer = NULL;
1287         unsigned int num_buf = 0;
1288
1289         plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1290
1291         list_for_each_entry(pasync_handle, plist, link) {
1292                 if (index == 0) {
1293                         phdr = pasync_handle->pbuffer;
1294                         hdr_len = pasync_handle->buffer_len;
1295                 } else {
1296                         buf_len = pasync_handle->buffer_len;
1297                         if (!num_buf) {
1298                                 pfirst_buffer = pasync_handle->pbuffer;
1299                                 num_buf++;
1300                         }
1301                         memcpy(pfirst_buffer + offset,
1302                                pasync_handle->pbuffer, buf_len);
1303                         offset = buf_len;
1304                 }
1305                 index++;
1306         }
1307
1308         status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1309                                            (beiscsi_conn->beiscsi_conn_cid -
1310                                             phba->fw_config.iscsi_cid_start),
1311                                             phdr, hdr_len, pfirst_buffer,
1312                                             buf_len);
1313
1314         if (status == 0)
1315                 hwi_free_async_msg(phba, cri);
1316         return 0;
1317 }
1318
1319 static unsigned int
1320 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1321                      struct beiscsi_hba *phba,
1322                      struct async_pdu_handle *pasync_handle)
1323 {
1324         struct hwi_async_pdu_context *pasync_ctx;
1325         struct hwi_controller *phwi_ctrlr;
1326         unsigned int bytes_needed = 0, status = 0;
1327         unsigned short cri = pasync_handle->cri;
1328         struct pdu_base *ppdu;
1329
1330         phwi_ctrlr = phba->phwi_ctrlr;
1331         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1332
1333         list_del(&pasync_handle->link);
1334         if (pasync_handle->is_header) {
1335                 pasync_ctx->async_header.busy_entries--;
1336                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1337                         hwi_free_async_msg(phba, cri);
1338                         BUG();
1339                 }
1340
1341                 pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1342                 pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1343                 pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1344                                 (unsigned short)pasync_handle->buffer_len;
1345                 list_add_tail(&pasync_handle->link,
1346                               &pasync_ctx->async_entry[cri].wait_queue.list);
1347
1348                 ppdu = pasync_handle->pbuffer;
1349                 bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1350                         data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1351                         0xFFFF0000) | ((be16_to_cpu((ppdu->
1352                         dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1353                         & PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1354
1355                 if (status == 0) {
1356                         pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1357                             bytes_needed;
1358
1359                         if (bytes_needed == 0)
1360                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1361                                                            pasync_ctx, cri);
1362                 }
1363         } else {
1364                 pasync_ctx->async_data.busy_entries--;
1365                 if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1366                         list_add_tail(&pasync_handle->link,
1367                                       &pasync_ctx->async_entry[cri].wait_queue.
1368                                       list);
1369                         pasync_ctx->async_entry[cri].wait_queue.
1370                                 bytes_received +=
1371                                 (unsigned short)pasync_handle->buffer_len;
1372
1373                         if (pasync_ctx->async_entry[cri].wait_queue.
1374                             bytes_received >=
1375                             pasync_ctx->async_entry[cri].wait_queue.
1376                             bytes_needed)
1377                                 status = hwi_fwd_async_msg(beiscsi_conn, phba,
1378                                                            pasync_ctx, cri);
1379                 }
1380         }
1381         return status;
1382 }
1383
1384 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1385                                          struct beiscsi_hba *phba,
1386                                          struct i_t_dpdu_cqe *pdpdu_cqe)
1387 {
1388         struct hwi_controller *phwi_ctrlr;
1389         struct hwi_async_pdu_context *pasync_ctx;
1390         struct async_pdu_handle *pasync_handle = NULL;
1391         unsigned int cq_index = -1;
1392
1393         phwi_ctrlr = phba->phwi_ctrlr;
1394         pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1395         pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1396                                              pdpdu_cqe, &cq_index);
1397
1398         if (pasync_handle->consumed == 0)
1399                 hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1400                                            cq_index);
1401         hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1402         hwi_post_async_buffers(phba, pasync_handle->is_header);
1403 }
1404
1405 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1406 {
1407         struct be_queue_info *mcc_cq;
1408         struct  be_mcc_compl *mcc_compl;
1409         unsigned int num_processed = 0;
1410
1411         mcc_cq = &phba->ctrl.mcc_obj.cq;
1412         mcc_compl = queue_tail_node(mcc_cq);
1413         mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1414         while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1415
1416                 if (num_processed >= 32) {
1417                         hwi_ring_cq_db(phba, mcc_cq->id,
1418                                         num_processed, 0, 0);
1419                         num_processed = 0;
1420                 }
1421                 if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1422                         /* Interpret flags as an async trailer */
1423                         if (is_link_state_evt(mcc_compl->flags))
1424                                 /* Interpret compl as a async link evt */
1425                                 beiscsi_async_link_state_process(phba,
1426                                 (struct be_async_event_link_state *) mcc_compl);
1427                         else
1428                                 SE_DEBUG(DBG_LVL_1,
1429                                         " Unsupported Async Event, flags"
1430                                         " = 0x%08x \n", mcc_compl->flags);
1431                 } else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1432                         be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1433                         atomic_dec(&phba->ctrl.mcc_obj.q.used);
1434                 }
1435
1436                 mcc_compl->flags = 0;
1437                 queue_tail_inc(mcc_cq);
1438                 mcc_compl = queue_tail_node(mcc_cq);
1439                 mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1440                 num_processed++;
1441         }
1442
1443         if (num_processed > 0)
1444                 hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1445
1446 }
1447
1448 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1449 {
1450         struct be_queue_info *cq;
1451         struct sol_cqe *sol;
1452         struct dmsg_cqe *dmsg;
1453         unsigned int num_processed = 0;
1454         unsigned int tot_nump = 0;
1455         struct beiscsi_conn *beiscsi_conn;
1456         struct beiscsi_endpoint *beiscsi_ep;
1457         struct iscsi_endpoint *ep;
1458         struct beiscsi_hba *phba;
1459
1460         cq = pbe_eq->cq;
1461         sol = queue_tail_node(cq);
1462         phba = pbe_eq->phba;
1463
1464         while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1465                CQE_VALID_MASK) {
1466                 be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1467
1468                 ep = phba->ep_array[(u32) ((sol->
1469                                    dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1470                                    SOL_CID_MASK) >> 6) -
1471                                    phba->fw_config.iscsi_cid_start];
1472
1473                 beiscsi_ep = ep->dd_data;
1474                 beiscsi_conn = beiscsi_ep->conn;
1475
1476                 if (num_processed >= 32) {
1477                         hwi_ring_cq_db(phba, cq->id,
1478                                         num_processed, 0, 0);
1479                         tot_nump += num_processed;
1480                         num_processed = 0;
1481                 }
1482
1483                 switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1484                         32] & CQE_CODE_MASK) {
1485                 case SOL_CMD_COMPLETE:
1486                         hwi_complete_cmd(beiscsi_conn, phba, sol);
1487                         break;
1488                 case DRIVERMSG_NOTIFY:
1489                         SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1490                         dmsg = (struct dmsg_cqe *)sol;
1491                         hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1492                         break;
1493                 case UNSOL_HDR_NOTIFY:
1494                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1495                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1496                                              (struct i_t_dpdu_cqe *)sol);
1497                         break;
1498                 case UNSOL_DATA_NOTIFY:
1499                         SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1500                         hwi_process_default_pdu_ring(beiscsi_conn, phba,
1501                                              (struct i_t_dpdu_cqe *)sol);
1502                         break;
1503                 case CXN_INVALIDATE_INDEX_NOTIFY:
1504                 case CMD_INVALIDATED_NOTIFY:
1505                 case CXN_INVALIDATE_NOTIFY:
1506                         SE_DEBUG(DBG_LVL_1,
1507                                  "Ignoring CQ Error notification for cmd/cxn"
1508                                  "invalidate\n");
1509                         break;
1510                 case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1511                 case CMD_KILLED_INVALID_STATSN_RCVD:
1512                 case CMD_KILLED_INVALID_R2T_RCVD:
1513                 case CMD_CXN_KILLED_LUN_INVALID:
1514                 case CMD_CXN_KILLED_ICD_INVALID:
1515                 case CMD_CXN_KILLED_ITT_INVALID:
1516                 case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1517                 case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1518                         SE_DEBUG(DBG_LVL_1,
1519                                  "CQ Error notification for cmd.. "
1520                                  "code %d cid 0x%x\n",
1521                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1522                                  32] & CQE_CODE_MASK,
1523                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1524                                  32] & SOL_CID_MASK));
1525                         break;
1526                 case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1527                         SE_DEBUG(DBG_LVL_1,
1528                                  "Digest error on def pdu ring, dropping..\n");
1529                         hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1530                                              (struct i_t_dpdu_cqe *) sol);
1531                         break;
1532                 case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1533                 case CXN_KILLED_BURST_LEN_MISMATCH:
1534                 case CXN_KILLED_AHS_RCVD:
1535                 case CXN_KILLED_HDR_DIGEST_ERR:
1536                 case CXN_KILLED_UNKNOWN_HDR:
1537                 case CXN_KILLED_STALE_ITT_TTT_RCVD:
1538                 case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1539                 case CXN_KILLED_TIMED_OUT:
1540                 case CXN_KILLED_FIN_RCVD:
1541                 case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1542                 case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1543                 case CXN_KILLED_OVER_RUN_RESIDUAL:
1544                 case CXN_KILLED_UNDER_RUN_RESIDUAL:
1545                 case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1546                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1547                                  "0x%x...\n",
1548                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1549                                  32] & CQE_CODE_MASK,
1550                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1551                                  32] & CQE_CID_MASK));
1552                         iscsi_conn_failure(beiscsi_conn->conn,
1553                                            ISCSI_ERR_CONN_FAILED);
1554                         break;
1555                 case CXN_KILLED_RST_SENT:
1556                 case CXN_KILLED_RST_RCVD:
1557                         SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1558                                 "received/sent on CID 0x%x...\n",
1559                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1560                                  32] & CQE_CODE_MASK,
1561                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1562                                  32] & CQE_CID_MASK));
1563                         iscsi_conn_failure(beiscsi_conn->conn,
1564                                            ISCSI_ERR_CONN_FAILED);
1565                         break;
1566                 default:
1567                         SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1568                                  "received on CID 0x%x...\n",
1569                                  sol->dw[offsetof(struct amap_sol_cqe, code) /
1570                                  32] & CQE_CODE_MASK,
1571                                  (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1572                                  32] & CQE_CID_MASK));
1573                         break;
1574                 }
1575
1576                 AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1577                 queue_tail_inc(cq);
1578                 sol = queue_tail_node(cq);
1579                 num_processed++;
1580         }
1581
1582         if (num_processed > 0) {
1583                 tot_nump += num_processed;
1584                 hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1585         }
1586         return tot_nump;
1587 }
1588
1589 void beiscsi_process_all_cqs(struct work_struct *work)
1590 {
1591         unsigned long flags;
1592         struct hwi_controller *phwi_ctrlr;
1593         struct hwi_context_memory *phwi_context;
1594         struct be_eq_obj *pbe_eq;
1595         struct beiscsi_hba *phba =
1596             container_of(work, struct beiscsi_hba, work_cqs);
1597
1598         phwi_ctrlr = phba->phwi_ctrlr;
1599         phwi_context = phwi_ctrlr->phwi_ctxt;
1600         if (phba->msix_enabled)
1601                 pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1602         else
1603                 pbe_eq = &phwi_context->be_eq[0];
1604
1605         if (phba->todo_mcc_cq) {
1606                 spin_lock_irqsave(&phba->isr_lock, flags);
1607                 phba->todo_mcc_cq = 0;
1608                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1609                 beiscsi_process_mcc_isr(phba);
1610         }
1611
1612         if (phba->todo_cq) {
1613                 spin_lock_irqsave(&phba->isr_lock, flags);
1614                 phba->todo_cq = 0;
1615                 spin_unlock_irqrestore(&phba->isr_lock, flags);
1616                 beiscsi_process_cq(pbe_eq);
1617         }
1618 }
1619
1620 static int be_iopoll(struct blk_iopoll *iop, int budget)
1621 {
1622         static unsigned int ret;
1623         struct beiscsi_hba *phba;
1624         struct be_eq_obj *pbe_eq;
1625
1626         pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1627         ret = beiscsi_process_cq(pbe_eq);
1628         if (ret < budget) {
1629                 phba = pbe_eq->phba;
1630                 blk_iopoll_complete(iop);
1631                 SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1632                 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1633         }
1634         return ret;
1635 }
1636
1637 static void
1638 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1639               unsigned int num_sg, struct beiscsi_io_task *io_task)
1640 {
1641         struct iscsi_sge *psgl;
1642         unsigned short sg_len, index;
1643         unsigned int sge_len = 0;
1644         unsigned long long addr;
1645         struct scatterlist *l_sg;
1646         unsigned int offset;
1647
1648         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1649                                       io_task->bhs_pa.u.a32.address_lo);
1650         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1651                                       io_task->bhs_pa.u.a32.address_hi);
1652
1653         l_sg = sg;
1654         for (index = 0; (index < num_sg) && (index < 2); index++,
1655                                                          sg = sg_next(sg)) {
1656                 if (index == 0) {
1657                         sg_len = sg_dma_len(sg);
1658                         addr = (u64) sg_dma_address(sg);
1659                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1660                                                         (addr & 0xFFFFFFFF));
1661                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1662                                                         (addr >> 32));
1663                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1664                                                         sg_len);
1665                         sge_len = sg_len;
1666                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1667                                                         1);
1668                 } else {
1669                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1670                                                         0);
1671                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1672                                                         pwrb, sge_len);
1673                         sg_len = sg_dma_len(sg);
1674                         addr = (u64) sg_dma_address(sg);
1675                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1676                                                         (addr & 0xFFFFFFFF));
1677                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1678                                                         (addr >> 32));
1679                         AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1680                                                         sg_len);
1681                 }
1682         }
1683         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1684         memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1685
1686         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1687
1688         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1689                         io_task->bhs_pa.u.a32.address_hi);
1690         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1691                         io_task->bhs_pa.u.a32.address_lo);
1692
1693         if (num_sg == 2)
1694                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1695         sg = l_sg;
1696         psgl++;
1697         psgl++;
1698         offset = 0;
1699         for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1700                 sg_len = sg_dma_len(sg);
1701                 addr = (u64) sg_dma_address(sg);
1702                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1703                                                 (addr & 0xFFFFFFFF));
1704                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1705                                                 (addr >> 32));
1706                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1707                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1708                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1709                 offset += sg_len;
1710         }
1711         psgl--;
1712         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1713 }
1714
1715 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1716 {
1717         struct iscsi_sge *psgl;
1718         unsigned long long addr;
1719         struct beiscsi_io_task *io_task = task->dd_data;
1720         struct beiscsi_conn *beiscsi_conn = io_task->conn;
1721         struct beiscsi_hba *phba = beiscsi_conn->phba;
1722
1723         io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1724         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1725                                 io_task->bhs_pa.u.a32.address_lo);
1726         AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1727                                 io_task->bhs_pa.u.a32.address_hi);
1728
1729         if (task->data) {
1730                 if (task->data_count) {
1731                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1732                         addr = (u64) pci_map_single(phba->pcidev,
1733                                                     task->data,
1734                                                     task->data_count, 1);
1735                 } else {
1736                         AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1737                         addr = 0;
1738                 }
1739                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1740                                                 (addr & 0xFFFFFFFF));
1741                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1742                                                 (addr >> 32));
1743                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1744                                                 task->data_count);
1745
1746                 AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1747         } else {
1748                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1749                 addr = 0;
1750         }
1751
1752         psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1753
1754         AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1755
1756         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1757                       io_task->bhs_pa.u.a32.address_hi);
1758         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1759                       io_task->bhs_pa.u.a32.address_lo);
1760         if (task->data) {
1761                 psgl++;
1762                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1763                 AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1764                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1765                 AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1766                 AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1767                 AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1768
1769                 psgl++;
1770                 if (task->data) {
1771                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1772                                                 (addr & 0xFFFFFFFF));
1773                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1774                                                 (addr >> 32));
1775                 }
1776                 AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1777         }
1778         AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1779 }
1780
1781 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1782 {
1783         unsigned int num_cq_pages, num_async_pdu_buf_pages;
1784         unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1785         unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1786
1787         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1788                                       sizeof(struct sol_cqe));
1789         num_async_pdu_buf_pages =
1790                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1791                                        phba->params.defpdu_hdr_sz);
1792         num_async_pdu_buf_sgl_pages =
1793                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1794                                        sizeof(struct phys_addr));
1795         num_async_pdu_data_pages =
1796                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1797                                        phba->params.defpdu_data_sz);
1798         num_async_pdu_data_sgl_pages =
1799                         PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1800                                        sizeof(struct phys_addr));
1801
1802         phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1803
1804         phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1805                                                  BE_ISCSI_PDU_HEADER_SIZE;
1806         phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1807                                             sizeof(struct hwi_context_memory);
1808
1809
1810         phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1811             * (phba->params.wrbs_per_cxn)
1812             * phba->params.cxns_per_ctrl;
1813         wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1814                                  (phba->params.wrbs_per_cxn);
1815         phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1816                                 phba->params.cxns_per_ctrl);
1817
1818         phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1819                 phba->params.icds_per_ctrl;
1820         phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1821                 phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1822
1823         phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1824                 num_async_pdu_buf_pages * PAGE_SIZE;
1825         phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1826                 num_async_pdu_data_pages * PAGE_SIZE;
1827         phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1828                 num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1829         phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1830                 num_async_pdu_data_sgl_pages * PAGE_SIZE;
1831         phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1832                 phba->params.asyncpdus_per_ctrl *
1833                 sizeof(struct async_pdu_handle);
1834         phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1835                 phba->params.asyncpdus_per_ctrl *
1836                 sizeof(struct async_pdu_handle);
1837         phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1838                 sizeof(struct hwi_async_pdu_context) +
1839                 (phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1840 }
1841
1842 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1843 {
1844         struct be_mem_descriptor *mem_descr;
1845         dma_addr_t bus_add;
1846         struct mem_array *mem_arr, *mem_arr_orig;
1847         unsigned int i, j, alloc_size, curr_alloc_size;
1848
1849         phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1850         if (!phba->phwi_ctrlr)
1851                 return -ENOMEM;
1852
1853         phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1854                                  GFP_KERNEL);
1855         if (!phba->init_mem) {
1856                 kfree(phba->phwi_ctrlr);
1857                 return -ENOMEM;
1858         }
1859
1860         mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1861                                GFP_KERNEL);
1862         if (!mem_arr_orig) {
1863                 kfree(phba->init_mem);
1864                 kfree(phba->phwi_ctrlr);
1865                 return -ENOMEM;
1866         }
1867
1868         mem_descr = phba->init_mem;
1869         for (i = 0; i < SE_MEM_MAX; i++) {
1870                 j = 0;
1871                 mem_arr = mem_arr_orig;
1872                 alloc_size = phba->mem_req[i];
1873                 memset(mem_arr, 0, sizeof(struct mem_array) *
1874                        BEISCSI_MAX_FRAGS_INIT);
1875                 curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1876                 do {
1877                         mem_arr->virtual_address = pci_alloc_consistent(
1878                                                         phba->pcidev,
1879                                                         curr_alloc_size,
1880                                                         &bus_add);
1881                         if (!mem_arr->virtual_address) {
1882                                 if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1883                                         goto free_mem;
1884                                 if (curr_alloc_size -
1885                                         rounddown_pow_of_two(curr_alloc_size))
1886                                         curr_alloc_size = rounddown_pow_of_two
1887                                                              (curr_alloc_size);
1888                                 else
1889                                         curr_alloc_size = curr_alloc_size / 2;
1890                         } else {
1891                                 mem_arr->bus_address.u.
1892                                     a64.address = (__u64) bus_add;
1893                                 mem_arr->size = curr_alloc_size;
1894                                 alloc_size -= curr_alloc_size;
1895                                 curr_alloc_size = min(be_max_phys_size *
1896                                                       1024, alloc_size);
1897                                 j++;
1898                                 mem_arr++;
1899                         }
1900                 } while (alloc_size);
1901                 mem_descr->num_elements = j;
1902                 mem_descr->size_in_bytes = phba->mem_req[i];
1903                 mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1904                                                GFP_KERNEL);
1905                 if (!mem_descr->mem_array)
1906                         goto free_mem;
1907
1908                 memcpy(mem_descr->mem_array, mem_arr_orig,
1909                        sizeof(struct mem_array) * j);
1910                 mem_descr++;
1911         }
1912         kfree(mem_arr_orig);
1913         return 0;
1914 free_mem:
1915         mem_descr->num_elements = j;
1916         while ((i) || (j)) {
1917                 for (j = mem_descr->num_elements; j > 0; j--) {
1918                         pci_free_consistent(phba->pcidev,
1919                                             mem_descr->mem_array[j - 1].size,
1920                                             mem_descr->mem_array[j - 1].
1921                                             virtual_address,
1922                                             mem_descr->mem_array[j - 1].
1923                                             bus_address.u.a64.address);
1924                 }
1925                 if (i) {
1926                         i--;
1927                         kfree(mem_descr->mem_array);
1928                         mem_descr--;
1929                 }
1930         }
1931         kfree(mem_arr_orig);
1932         kfree(phba->init_mem);
1933         kfree(phba->phwi_ctrlr);
1934         return -ENOMEM;
1935 }
1936
1937 static int beiscsi_get_memory(struct beiscsi_hba *phba)
1938 {
1939         beiscsi_find_mem_req(phba);
1940         return beiscsi_alloc_mem(phba);
1941 }
1942
1943 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1944 {
1945         struct pdu_data_out *pdata_out;
1946         struct pdu_nop_out *pnop_out;
1947         struct be_mem_descriptor *mem_descr;
1948
1949         mem_descr = phba->init_mem;
1950         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1951         pdata_out =
1952             (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1953         memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1954
1955         AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1956                       IIOC_SCSI_DATA);
1957
1958         pnop_out =
1959             (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1960                                    virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1961
1962         memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1963         AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1964         AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1965         AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1966 }
1967
1968 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1969 {
1970         struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1971         struct wrb_handle *pwrb_handle;
1972         struct hwi_controller *phwi_ctrlr;
1973         struct hwi_wrb_context *pwrb_context;
1974         struct iscsi_wrb *pwrb;
1975         unsigned int num_cxn_wrbh;
1976         unsigned int num_cxn_wrb, j, idx, index;
1977
1978         mem_descr_wrbh = phba->init_mem;
1979         mem_descr_wrbh += HWI_MEM_WRBH;
1980
1981         mem_descr_wrb = phba->init_mem;
1982         mem_descr_wrb += HWI_MEM_WRB;
1983
1984         idx = 0;
1985         pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
1986         num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
1987                         ((sizeof(struct wrb_handle)) *
1988                          phba->params.wrbs_per_cxn));
1989         phwi_ctrlr = phba->phwi_ctrlr;
1990
1991         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
1992                 pwrb_context = &phwi_ctrlr->wrb_context[index];
1993                 pwrb_context->pwrb_handle_base =
1994                                 kzalloc(sizeof(struct wrb_handle *) *
1995                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
1996                 pwrb_context->pwrb_handle_basestd =
1997                                 kzalloc(sizeof(struct wrb_handle *) *
1998                                         phba->params.wrbs_per_cxn, GFP_KERNEL);
1999                 if (num_cxn_wrbh) {
2000                         pwrb_context->alloc_index = 0;
2001                         pwrb_context->wrb_handles_available = 0;
2002                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2003                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2004                                 pwrb_context->pwrb_handle_basestd[j] =
2005                                                                 pwrb_handle;
2006                                 pwrb_context->wrb_handles_available++;
2007                                 pwrb_handle->wrb_index = j;
2008                                 pwrb_handle++;
2009                         }
2010                         pwrb_context->free_index = 0;
2011                         num_cxn_wrbh--;
2012                 } else {
2013                         idx++;
2014                         pwrb_handle =
2015                             mem_descr_wrbh->mem_array[idx].virtual_address;
2016                         num_cxn_wrbh =
2017                             ((mem_descr_wrbh->mem_array[idx].size) /
2018                              ((sizeof(struct wrb_handle)) *
2019                               phba->params.wrbs_per_cxn));
2020                         pwrb_context->alloc_index = 0;
2021                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2022                                 pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2023                                 pwrb_context->pwrb_handle_basestd[j] =
2024                                     pwrb_handle;
2025                                 pwrb_context->wrb_handles_available++;
2026                                 pwrb_handle->wrb_index = j;
2027                                 pwrb_handle++;
2028                         }
2029                         pwrb_context->free_index = 0;
2030                         num_cxn_wrbh--;
2031                 }
2032         }
2033         idx = 0;
2034         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2035         num_cxn_wrb =
2036             ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2037              phba->params.wrbs_per_cxn);
2038
2039         for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2040                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2041                 if (num_cxn_wrb) {
2042                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2043                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2044                                 pwrb_handle->pwrb = pwrb;
2045                                 pwrb++;
2046                         }
2047                         num_cxn_wrb--;
2048                 } else {
2049                         idx++;
2050                         pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2051                         num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2052                                         (sizeof(struct iscsi_wrb)) *
2053                                         phba->params.wrbs_per_cxn);
2054                         for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2055                                 pwrb_handle = pwrb_context->pwrb_handle_base[j];
2056                                 pwrb_handle->pwrb = pwrb;
2057                                 pwrb++;
2058                         }
2059                         num_cxn_wrb--;
2060                 }
2061         }
2062 }
2063
2064 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2065 {
2066         struct hwi_controller *phwi_ctrlr;
2067         struct hba_parameters *p = &phba->params;
2068         struct hwi_async_pdu_context *pasync_ctx;
2069         struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2070         unsigned int index;
2071         struct be_mem_descriptor *mem_descr;
2072
2073         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2074         mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2075
2076         phwi_ctrlr = phba->phwi_ctrlr;
2077         phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2078                                 mem_descr->mem_array[0].virtual_address;
2079         pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2080         memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2081
2082         pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2083         pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2084         pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2085         pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2086
2087         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2088         mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2089         if (mem_descr->mem_array[0].virtual_address) {
2090                 SE_DEBUG(DBG_LVL_8,
2091                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2092                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2093         } else
2094                 shost_printk(KERN_WARNING, phba->shost,
2095                              "No Virtual address \n");
2096
2097         pasync_ctx->async_header.va_base =
2098                         mem_descr->mem_array[0].virtual_address;
2099
2100         pasync_ctx->async_header.pa_base.u.a64.address =
2101                         mem_descr->mem_array[0].bus_address.u.a64.address;
2102
2103         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2104         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2105         if (mem_descr->mem_array[0].virtual_address) {
2106                 SE_DEBUG(DBG_LVL_8,
2107                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2108                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2109         } else
2110                 shost_printk(KERN_WARNING, phba->shost,
2111                             "No Virtual address \n");
2112         pasync_ctx->async_header.ring_base =
2113                         mem_descr->mem_array[0].virtual_address;
2114
2115         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2116         mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2117         if (mem_descr->mem_array[0].virtual_address) {
2118                 SE_DEBUG(DBG_LVL_8,
2119                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2120                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2121         } else
2122                 shost_printk(KERN_WARNING, phba->shost,
2123                             "No Virtual address \n");
2124
2125         pasync_ctx->async_header.handle_base =
2126                         mem_descr->mem_array[0].virtual_address;
2127         pasync_ctx->async_header.writables = 0;
2128         INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2129
2130         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2131         mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2132         if (mem_descr->mem_array[0].virtual_address) {
2133                 SE_DEBUG(DBG_LVL_8,
2134                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2135                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2136         } else
2137                 shost_printk(KERN_WARNING, phba->shost,
2138                             "No Virtual address \n");
2139         pasync_ctx->async_data.va_base =
2140                         mem_descr->mem_array[0].virtual_address;
2141         pasync_ctx->async_data.pa_base.u.a64.address =
2142                         mem_descr->mem_array[0].bus_address.u.a64.address;
2143
2144         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2145         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2146         if (mem_descr->mem_array[0].virtual_address) {
2147                 SE_DEBUG(DBG_LVL_8,
2148                          "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2149                          "va=%p \n", mem_descr->mem_array[0].virtual_address);
2150         } else
2151                 shost_printk(KERN_WARNING, phba->shost,
2152                              "No Virtual address \n");
2153
2154         pasync_ctx->async_data.ring_base =
2155                         mem_descr->mem_array[0].virtual_address;
2156
2157         mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2158         mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2159         if (!mem_descr->mem_array[0].virtual_address)
2160                 shost_printk(KERN_WARNING, phba->shost,
2161                             "No Virtual address \n");
2162
2163         pasync_ctx->async_data.handle_base =
2164                         mem_descr->mem_array[0].virtual_address;
2165         pasync_ctx->async_data.writables = 0;
2166         INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2167
2168         pasync_header_h =
2169                 (struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2170         pasync_data_h =
2171                 (struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2172
2173         for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2174                 pasync_header_h->cri = -1;
2175                 pasync_header_h->index = (char)index;
2176                 INIT_LIST_HEAD(&pasync_header_h->link);
2177                 pasync_header_h->pbuffer =
2178                         (void *)((unsigned long)
2179                         (pasync_ctx->async_header.va_base) +
2180                         (p->defpdu_hdr_sz * index));
2181
2182                 pasync_header_h->pa.u.a64.address =
2183                         pasync_ctx->async_header.pa_base.u.a64.address +
2184                         (p->defpdu_hdr_sz * index);
2185
2186                 list_add_tail(&pasync_header_h->link,
2187                                 &pasync_ctx->async_header.free_list);
2188                 pasync_header_h++;
2189                 pasync_ctx->async_header.free_entries++;
2190                 pasync_ctx->async_header.writables++;
2191
2192                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2193                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2194                                header_busy_list);
2195                 pasync_data_h->cri = -1;
2196                 pasync_data_h->index = (char)index;
2197                 INIT_LIST_HEAD(&pasync_data_h->link);
2198                 pasync_data_h->pbuffer =
2199                         (void *)((unsigned long)
2200                         (pasync_ctx->async_data.va_base) +
2201                         (p->defpdu_data_sz * index));
2202
2203                 pasync_data_h->pa.u.a64.address =
2204                     pasync_ctx->async_data.pa_base.u.a64.address +
2205                     (p->defpdu_data_sz * index);
2206
2207                 list_add_tail(&pasync_data_h->link,
2208                               &pasync_ctx->async_data.free_list);
2209                 pasync_data_h++;
2210                 pasync_ctx->async_data.free_entries++;
2211                 pasync_ctx->async_data.writables++;
2212
2213                 INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2214         }
2215
2216         pasync_ctx->async_header.host_write_ptr = 0;
2217         pasync_ctx->async_header.ep_read_ptr = -1;
2218         pasync_ctx->async_data.host_write_ptr = 0;
2219         pasync_ctx->async_data.ep_read_ptr = -1;
2220 }
2221
2222 static int
2223 be_sgl_create_contiguous(void *virtual_address,
2224                          u64 physical_address, u32 length,
2225                          struct be_dma_mem *sgl)
2226 {
2227         WARN_ON(!virtual_address);
2228         WARN_ON(!physical_address);
2229         WARN_ON(!length > 0);
2230         WARN_ON(!sgl);
2231
2232         sgl->va = virtual_address;
2233         sgl->dma = physical_address;
2234         sgl->size = length;
2235
2236         return 0;
2237 }
2238
2239 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2240 {
2241         memset(sgl, 0, sizeof(*sgl));
2242 }
2243
2244 static void
2245 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2246                      struct mem_array *pmem, struct be_dma_mem *sgl)
2247 {
2248         if (sgl->va)
2249                 be_sgl_destroy_contiguous(sgl);
2250
2251         be_sgl_create_contiguous(pmem->virtual_address,
2252                                  pmem->bus_address.u.a64.address,
2253                                  pmem->size, sgl);
2254 }
2255
2256 static void
2257 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2258                            struct mem_array *pmem, struct be_dma_mem *sgl)
2259 {
2260         if (sgl->va)
2261                 be_sgl_destroy_contiguous(sgl);
2262
2263         be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2264                                  pmem->bus_address.u.a64.address,
2265                                  pmem->size, sgl);
2266 }
2267
2268 static int be_fill_queue(struct be_queue_info *q,
2269                 u16 len, u16 entry_size, void *vaddress)
2270 {
2271         struct be_dma_mem *mem = &q->dma_mem;
2272
2273         memset(q, 0, sizeof(*q));
2274         q->len = len;
2275         q->entry_size = entry_size;
2276         mem->size = len * entry_size;
2277         mem->va = vaddress;
2278         if (!mem->va)
2279                 return -ENOMEM;
2280         memset(mem->va, 0, mem->size);
2281         return 0;
2282 }
2283
2284 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2285                              struct hwi_context_memory *phwi_context)
2286 {
2287         unsigned int i, num_eq_pages;
2288         int ret, eq_for_mcc;
2289         struct be_queue_info *eq;
2290         struct be_dma_mem *mem;
2291         void *eq_vaddress;
2292         dma_addr_t paddr;
2293
2294         num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2295                                       sizeof(struct be_eq_entry));
2296
2297         if (phba->msix_enabled)
2298                 eq_for_mcc = 1;
2299         else
2300                 eq_for_mcc = 0;
2301         for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2302                 eq = &phwi_context->be_eq[i].q;
2303                 mem = &eq->dma_mem;
2304                 phwi_context->be_eq[i].phba = phba;
2305                 eq_vaddress = pci_alloc_consistent(phba->pcidev,
2306                                                      num_eq_pages * PAGE_SIZE,
2307                                                      &paddr);
2308                 if (!eq_vaddress)
2309                         goto create_eq_error;
2310
2311                 mem->va = eq_vaddress;
2312                 ret = be_fill_queue(eq, phba->params.num_eq_entries,
2313                                     sizeof(struct be_eq_entry), eq_vaddress);
2314                 if (ret) {
2315                         shost_printk(KERN_ERR, phba->shost,
2316                                      "be_fill_queue Failed for EQ \n");
2317                         goto create_eq_error;
2318                 }
2319
2320                 mem->dma = paddr;
2321                 ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2322                                             phwi_context->cur_eqd);
2323                 if (ret) {
2324                         shost_printk(KERN_ERR, phba->shost,
2325                                      "beiscsi_cmd_eq_create"
2326                                      "Failedfor EQ \n");
2327                         goto create_eq_error;
2328                 }
2329                 SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2330         }
2331         return 0;
2332 create_eq_error:
2333         for (i = 0; i < (phba->num_cpus + 1); i++) {
2334                 eq = &phwi_context->be_eq[i].q;
2335                 mem = &eq->dma_mem;
2336                 if (mem->va)
2337                         pci_free_consistent(phba->pcidev, num_eq_pages
2338                                             * PAGE_SIZE,
2339                                             mem->va, mem->dma);
2340         }
2341         return ret;
2342 }
2343
2344 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2345                              struct hwi_context_memory *phwi_context)
2346 {
2347         unsigned int i, num_cq_pages;
2348         int ret;
2349         struct be_queue_info *cq, *eq;
2350         struct be_dma_mem *mem;
2351         struct be_eq_obj *pbe_eq;
2352         void *cq_vaddress;
2353         dma_addr_t paddr;
2354
2355         num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2356                                       sizeof(struct sol_cqe));
2357
2358         for (i = 0; i < phba->num_cpus; i++) {
2359                 cq = &phwi_context->be_cq[i];
2360                 eq = &phwi_context->be_eq[i].q;
2361                 pbe_eq = &phwi_context->be_eq[i];
2362                 pbe_eq->cq = cq;
2363                 pbe_eq->phba = phba;
2364                 mem = &cq->dma_mem;
2365                 cq_vaddress = pci_alloc_consistent(phba->pcidev,
2366                                                      num_cq_pages * PAGE_SIZE,
2367                                                      &paddr);
2368                 if (!cq_vaddress)
2369                         goto create_cq_error;
2370                 ret = be_fill_queue(cq, phba->params.num_cq_entries,
2371                                     sizeof(struct sol_cqe), cq_vaddress);
2372                 if (ret) {
2373                         shost_printk(KERN_ERR, phba->shost,
2374                                      "be_fill_queue Failed for ISCSI CQ \n");
2375                         goto create_cq_error;
2376                 }
2377
2378                 mem->dma = paddr;
2379                 ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2380                                             false, 0);
2381                 if (ret) {
2382                         shost_printk(KERN_ERR, phba->shost,
2383                                      "beiscsi_cmd_eq_create"
2384                                      "Failed for ISCSI CQ \n");
2385                         goto create_cq_error;
2386                 }
2387                 SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2388                                                  cq->id, eq->id);
2389                 SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2390         }
2391         return 0;
2392
2393 create_cq_error:
2394         for (i = 0; i < phba->num_cpus; i++) {
2395                 cq = &phwi_context->be_cq[i];
2396                 mem = &cq->dma_mem;
2397                 if (mem->va)
2398                         pci_free_consistent(phba->pcidev, num_cq_pages
2399                                             * PAGE_SIZE,
2400                                             mem->va, mem->dma);
2401         }
2402         return ret;
2403
2404 }
2405
2406 static int
2407 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2408                        struct hwi_context_memory *phwi_context,
2409                        struct hwi_controller *phwi_ctrlr,
2410                        unsigned int def_pdu_ring_sz)
2411 {
2412         unsigned int idx;
2413         int ret;
2414         struct be_queue_info *dq, *cq;
2415         struct be_dma_mem *mem;
2416         struct be_mem_descriptor *mem_descr;
2417         void *dq_vaddress;
2418
2419         idx = 0;
2420         dq = &phwi_context->be_def_hdrq;
2421         cq = &phwi_context->be_cq[0];
2422         mem = &dq->dma_mem;
2423         mem_descr = phba->init_mem;
2424         mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2425         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2426         ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2427                             sizeof(struct phys_addr),
2428                             sizeof(struct phys_addr), dq_vaddress);
2429         if (ret) {
2430                 shost_printk(KERN_ERR, phba->shost,
2431                              "be_fill_queue Failed for DEF PDU HDR\n");
2432                 return ret;
2433         }
2434         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2435         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2436                                               def_pdu_ring_sz,
2437                                               phba->params.defpdu_hdr_sz);
2438         if (ret) {
2439                 shost_printk(KERN_ERR, phba->shost,
2440                              "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2441                 return ret;
2442         }
2443         phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2444         SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2445                  phwi_context->be_def_hdrq.id);
2446         hwi_post_async_buffers(phba, 1);
2447         return 0;
2448 }
2449
2450 static int
2451 beiscsi_create_def_data(struct beiscsi_hba *phba,
2452                         struct hwi_context_memory *phwi_context,
2453                         struct hwi_controller *phwi_ctrlr,
2454                         unsigned int def_pdu_ring_sz)
2455 {
2456         unsigned int idx;
2457         int ret;
2458         struct be_queue_info *dataq, *cq;
2459         struct be_dma_mem *mem;
2460         struct be_mem_descriptor *mem_descr;
2461         void *dq_vaddress;
2462
2463         idx = 0;
2464         dataq = &phwi_context->be_def_dataq;
2465         cq = &phwi_context->be_cq[0];
2466         mem = &dataq->dma_mem;
2467         mem_descr = phba->init_mem;
2468         mem_descr += HWI_MEM_ASYNC_DATA_RING;
2469         dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2470         ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2471                             sizeof(struct phys_addr),
2472                             sizeof(struct phys_addr), dq_vaddress);
2473         if (ret) {
2474                 shost_printk(KERN_ERR, phba->shost,
2475                              "be_fill_queue Failed for DEF PDU DATA\n");
2476                 return ret;
2477         }
2478         mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2479         ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2480                                               def_pdu_ring_sz,
2481                                               phba->params.defpdu_data_sz);
2482         if (ret) {
2483                 shost_printk(KERN_ERR, phba->shost,
2484                              "be_cmd_create_default_pdu_queue Failed"
2485                              " for DEF PDU DATA\n");
2486                 return ret;
2487         }
2488         phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2489         SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2490                  phwi_context->be_def_dataq.id);
2491         hwi_post_async_buffers(phba, 0);
2492         SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2493         return 0;
2494 }
2495
2496 static int
2497 beiscsi_post_pages(struct beiscsi_hba *phba)
2498 {
2499         struct be_mem_descriptor *mem_descr;
2500         struct mem_array *pm_arr;
2501         unsigned int page_offset, i;
2502         struct be_dma_mem sgl;
2503         int status;
2504
2505         mem_descr = phba->init_mem;
2506         mem_descr += HWI_MEM_SGE;
2507         pm_arr = mem_descr->mem_array;
2508
2509         page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2510                         phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2511         for (i = 0; i < mem_descr->num_elements; i++) {
2512                 hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2513                 status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2514                                                 page_offset,
2515                                                 (pm_arr->size / PAGE_SIZE));
2516                 page_offset += pm_arr->size / PAGE_SIZE;
2517                 if (status != 0) {
2518                         shost_printk(KERN_ERR, phba->shost,
2519                                      "post sgl failed.\n");
2520                         return status;
2521                 }
2522                 pm_arr++;
2523         }
2524         SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2525         return 0;
2526 }
2527
2528 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2529 {
2530         struct be_dma_mem *mem = &q->dma_mem;
2531         if (mem->va)
2532                 pci_free_consistent(phba->pcidev, mem->size,
2533                         mem->va, mem->dma);
2534 }
2535
2536 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2537                 u16 len, u16 entry_size)
2538 {
2539         struct be_dma_mem *mem = &q->dma_mem;
2540
2541         memset(q, 0, sizeof(*q));
2542         q->len = len;
2543         q->entry_size = entry_size;
2544         mem->size = len * entry_size;
2545         mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2546         if (!mem->va)
2547                 return -1;
2548         memset(mem->va, 0, mem->size);
2549         return 0;
2550 }
2551
2552 static int
2553 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2554                          struct hwi_context_memory *phwi_context,
2555                          struct hwi_controller *phwi_ctrlr)
2556 {
2557         unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2558         u64 pa_addr_lo;
2559         unsigned int idx, num, i;
2560         struct mem_array *pwrb_arr;
2561         void *wrb_vaddr;
2562         struct be_dma_mem sgl;
2563         struct be_mem_descriptor *mem_descr;
2564         int status;
2565
2566         idx = 0;
2567         mem_descr = phba->init_mem;
2568         mem_descr += HWI_MEM_WRB;
2569         pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2570                            GFP_KERNEL);
2571         if (!pwrb_arr) {
2572                 shost_printk(KERN_ERR, phba->shost,
2573                              "Memory alloc failed in create wrb ring.\n");
2574                 return -ENOMEM;
2575         }
2576         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2577         pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2578         num_wrb_rings = mem_descr->mem_array[idx].size /
2579                 (phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2580
2581         for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2582                 if (num_wrb_rings) {
2583                         pwrb_arr[num].virtual_address = wrb_vaddr;
2584                         pwrb_arr[num].bus_address.u.a64.address = pa_addr_lo;
2585                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2586                                             sizeof(struct iscsi_wrb);
2587                         wrb_vaddr += pwrb_arr[num].size;
2588                         pa_addr_lo += pwrb_arr[num].size;
2589                         num_wrb_rings--;
2590                 } else {
2591                         idx++;
2592                         wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2593                         pa_addr_lo = mem_descr->mem_array[idx].\
2594                                         bus_address.u.a64.address;
2595                         num_wrb_rings = mem_descr->mem_array[idx].size /
2596                                         (phba->params.wrbs_per_cxn *
2597                                         sizeof(struct iscsi_wrb));
2598                         pwrb_arr[num].virtual_address = wrb_vaddr;
2599                         pwrb_arr[num].bus_address.u.a64.address\
2600                                                 = pa_addr_lo;
2601                         pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2602                                                  sizeof(struct iscsi_wrb);
2603                         wrb_vaddr += pwrb_arr[num].size;
2604                         pa_addr_lo   += pwrb_arr[num].size;
2605                         num_wrb_rings--;
2606                 }
2607         }
2608         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2609                 wrb_mem_index = 0;
2610                 offset = 0;
2611                 size = 0;
2612
2613                 hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2614                 status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2615                                             &phwi_context->be_wrbq[i]);
2616                 if (status != 0) {
2617                         shost_printk(KERN_ERR, phba->shost,
2618                                      "wrbq create failed.");
2619                         return status;
2620                 }
2621                 phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2622                                                                    id;
2623         }
2624         kfree(pwrb_arr);
2625         return 0;
2626 }
2627
2628 static void free_wrb_handles(struct beiscsi_hba *phba)
2629 {
2630         unsigned int index;
2631         struct hwi_controller *phwi_ctrlr;
2632         struct hwi_wrb_context *pwrb_context;
2633
2634         phwi_ctrlr = phba->phwi_ctrlr;
2635         for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2636                 pwrb_context = &phwi_ctrlr->wrb_context[index];
2637                 kfree(pwrb_context->pwrb_handle_base);
2638                 kfree(pwrb_context->pwrb_handle_basestd);
2639         }
2640 }
2641
2642 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2643 {
2644         struct be_queue_info *q;
2645         struct be_ctrl_info *ctrl = &phba->ctrl;
2646
2647         q = &phba->ctrl.mcc_obj.q;
2648         if (q->created)
2649                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2650         be_queue_free(phba, q);
2651
2652         q = &phba->ctrl.mcc_obj.cq;
2653         if (q->created)
2654                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2655         be_queue_free(phba, q);
2656 }
2657
2658 static void hwi_cleanup(struct beiscsi_hba *phba)
2659 {
2660         struct be_queue_info *q;
2661         struct be_ctrl_info *ctrl = &phba->ctrl;
2662         struct hwi_controller *phwi_ctrlr;
2663         struct hwi_context_memory *phwi_context;
2664         int i, eq_num;
2665
2666         phwi_ctrlr = phba->phwi_ctrlr;
2667         phwi_context = phwi_ctrlr->phwi_ctxt;
2668         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2669                 q = &phwi_context->be_wrbq[i];
2670                 if (q->created)
2671                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2672         }
2673         free_wrb_handles(phba);
2674
2675         q = &phwi_context->be_def_hdrq;
2676         if (q->created)
2677                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2678
2679         q = &phwi_context->be_def_dataq;
2680         if (q->created)
2681                 beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2682
2683         beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2684
2685         for (i = 0; i < (phba->num_cpus); i++) {
2686                 q = &phwi_context->be_cq[i];
2687                 if (q->created)
2688                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2689         }
2690         if (phba->msix_enabled)
2691                 eq_num = 1;
2692         else
2693                 eq_num = 0;
2694         for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2695                 q = &phwi_context->be_eq[i].q;
2696                 if (q->created)
2697                         beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2698         }
2699         be_mcc_queues_destroy(phba);
2700 }
2701
2702 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2703                                 struct hwi_context_memory *phwi_context)
2704 {
2705         struct be_queue_info *q, *cq;
2706         struct be_ctrl_info *ctrl = &phba->ctrl;
2707
2708         /* Alloc MCC compl queue */
2709         cq = &phba->ctrl.mcc_obj.cq;
2710         if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2711                         sizeof(struct be_mcc_compl)))
2712                 goto err;
2713         /* Ask BE to create MCC compl queue; */
2714         if (phba->msix_enabled) {
2715                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2716                                          [phba->num_cpus].q, false, true, 0))
2717                 goto mcc_cq_free;
2718         } else {
2719                 if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2720                                           false, true, 0))
2721                 goto mcc_cq_free;
2722         }
2723
2724         /* Alloc MCC queue */
2725         q = &phba->ctrl.mcc_obj.q;
2726         if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2727                 goto mcc_cq_destroy;
2728
2729         /* Ask BE to create MCC queue */
2730         if (beiscsi_cmd_mccq_create(phba, q, cq))
2731                 goto mcc_q_free;
2732
2733         return 0;
2734
2735 mcc_q_free:
2736         be_queue_free(phba, q);
2737 mcc_cq_destroy:
2738         beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2739 mcc_cq_free:
2740         be_queue_free(phba, cq);
2741 err:
2742         return -1;
2743 }
2744
2745 static int find_num_cpus(void)
2746 {
2747         int  num_cpus = 0;
2748
2749         num_cpus = num_online_cpus();
2750         if (num_cpus >= MAX_CPUS)
2751                 num_cpus = MAX_CPUS - 1;
2752
2753         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2754         return num_cpus;
2755 }
2756
2757 static int hwi_init_port(struct beiscsi_hba *phba)
2758 {
2759         struct hwi_controller *phwi_ctrlr;
2760         struct hwi_context_memory *phwi_context;
2761         unsigned int def_pdu_ring_sz;
2762         struct be_ctrl_info *ctrl = &phba->ctrl;
2763         int status;
2764
2765         def_pdu_ring_sz =
2766                 phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2767         phwi_ctrlr = phba->phwi_ctrlr;
2768         phwi_context = phwi_ctrlr->phwi_ctxt;
2769         phwi_context->max_eqd = 0;
2770         phwi_context->min_eqd = 0;
2771         phwi_context->cur_eqd = 64;
2772         be_cmd_fw_initialize(&phba->ctrl);
2773
2774         status = beiscsi_create_eqs(phba, phwi_context);
2775         if (status != 0) {
2776                 shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2777                 goto error;
2778         }
2779
2780         status = be_mcc_queues_create(phba, phwi_context);
2781         if (status != 0)
2782                 goto error;
2783
2784         status = mgmt_check_supported_fw(ctrl, phba);
2785         if (status != 0) {
2786                 shost_printk(KERN_ERR, phba->shost,
2787                              "Unsupported fw version \n");
2788                 goto error;
2789         }
2790
2791         status = beiscsi_create_cqs(phba, phwi_context);
2792         if (status != 0) {
2793                 shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2794                 goto error;
2795         }
2796
2797         status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2798                                         def_pdu_ring_sz);
2799         if (status != 0) {
2800                 shost_printk(KERN_ERR, phba->shost,
2801                              "Default Header not created\n");
2802                 goto error;
2803         }
2804
2805         status = beiscsi_create_def_data(phba, phwi_context,
2806                                          phwi_ctrlr, def_pdu_ring_sz);
2807         if (status != 0) {
2808                 shost_printk(KERN_ERR, phba->shost,
2809                              "Default Data not created\n");
2810                 goto error;
2811         }
2812
2813         status = beiscsi_post_pages(phba);
2814         if (status != 0) {
2815                 shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2816                 goto error;
2817         }
2818
2819         status = beiscsi_create_wrb_rings(phba, phwi_context, phwi_ctrlr);
2820         if (status != 0) {
2821                 shost_printk(KERN_ERR, phba->shost,
2822                              "WRB Rings not created\n");
2823                 goto error;
2824         }
2825
2826         SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2827         return 0;
2828
2829 error:
2830         shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2831         hwi_cleanup(phba);
2832         return -ENOMEM;
2833 }
2834
2835 static int hwi_init_controller(struct beiscsi_hba *phba)
2836 {
2837         struct hwi_controller *phwi_ctrlr;
2838
2839         phwi_ctrlr = phba->phwi_ctrlr;
2840         if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2841                 phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2842                     init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2843                 SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2844                          phwi_ctrlr->phwi_ctxt);
2845         } else {
2846                 shost_printk(KERN_ERR, phba->shost,
2847                              "HWI_MEM_ADDN_CONTEXT is more than one element."
2848                              "Failing to load\n");
2849                 return -ENOMEM;
2850         }
2851
2852         iscsi_init_global_templates(phba);
2853         beiscsi_init_wrb_handle(phba);
2854         hwi_init_async_pdu_ctx(phba);
2855         if (hwi_init_port(phba) != 0) {
2856                 shost_printk(KERN_ERR, phba->shost,
2857                              "hwi_init_controller failed\n");
2858                 return -ENOMEM;
2859         }
2860         return 0;
2861 }
2862
2863 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2864 {
2865         struct be_mem_descriptor *mem_descr;
2866         int i, j;
2867
2868         mem_descr = phba->init_mem;
2869         i = 0;
2870         j = 0;
2871         for (i = 0; i < SE_MEM_MAX; i++) {
2872                 for (j = mem_descr->num_elements; j > 0; j--) {
2873                         pci_free_consistent(phba->pcidev,
2874                           mem_descr->mem_array[j - 1].size,
2875                           mem_descr->mem_array[j - 1].virtual_address,
2876                           mem_descr->mem_array[j - 1].bus_address.
2877                                 u.a64.address);
2878                 }
2879                 kfree(mem_descr->mem_array);
2880                 mem_descr++;
2881         }
2882         kfree(phba->init_mem);
2883         kfree(phba->phwi_ctrlr);
2884 }
2885
2886 static int beiscsi_init_controller(struct beiscsi_hba *phba)
2887 {
2888         int ret = -ENOMEM;
2889
2890         ret = beiscsi_get_memory(phba);
2891         if (ret < 0) {
2892                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2893                              "Failed in beiscsi_alloc_memory \n");
2894                 return ret;
2895         }
2896
2897         ret = hwi_init_controller(phba);
2898         if (ret)
2899                 goto free_init;
2900         SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2901         return 0;
2902
2903 free_init:
2904         beiscsi_free_mem(phba);
2905         return -ENOMEM;
2906 }
2907
2908 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2909 {
2910         struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2911         struct sgl_handle *psgl_handle;
2912         struct iscsi_sge *pfrag;
2913         unsigned int arr_index, i, idx;
2914
2915         phba->io_sgl_hndl_avbl = 0;
2916         phba->eh_sgl_hndl_avbl = 0;
2917
2918         mem_descr_sglh = phba->init_mem;
2919         mem_descr_sglh += HWI_MEM_SGLH;
2920         if (1 == mem_descr_sglh->num_elements) {
2921                 phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2922                                                  phba->params.ios_per_ctrl,
2923                                                  GFP_KERNEL);
2924                 if (!phba->io_sgl_hndl_base) {
2925                         shost_printk(KERN_ERR, phba->shost,
2926                                      "Mem Alloc Failed. Failing to load\n");
2927                         return -ENOMEM;
2928                 }
2929                 phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2930                                                  (phba->params.icds_per_ctrl -
2931                                                  phba->params.ios_per_ctrl),
2932                                                  GFP_KERNEL);
2933                 if (!phba->eh_sgl_hndl_base) {
2934                         kfree(phba->io_sgl_hndl_base);
2935                         shost_printk(KERN_ERR, phba->shost,
2936                                      "Mem Alloc Failed. Failing to load\n");
2937                         return -ENOMEM;
2938                 }
2939         } else {
2940                 shost_printk(KERN_ERR, phba->shost,
2941                              "HWI_MEM_SGLH is more than one element."
2942                              "Failing to load\n");
2943                 return -ENOMEM;
2944         }
2945
2946         arr_index = 0;
2947         idx = 0;
2948         while (idx < mem_descr_sglh->num_elements) {
2949                 psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2950
2951                 for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2952                       sizeof(struct sgl_handle)); i++) {
2953                         if (arr_index < phba->params.ios_per_ctrl) {
2954                                 phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2955                                 phba->io_sgl_hndl_avbl++;
2956                                 arr_index++;
2957                         } else {
2958                                 phba->eh_sgl_hndl_base[arr_index -
2959                                         phba->params.ios_per_ctrl] =
2960                                                                 psgl_handle;
2961                                 arr_index++;
2962                                 phba->eh_sgl_hndl_avbl++;
2963                         }
2964                         psgl_handle++;
2965                 }
2966                 idx++;
2967         }
2968         SE_DEBUG(DBG_LVL_8,
2969                  "phba->io_sgl_hndl_avbl=%d"
2970                  "phba->eh_sgl_hndl_avbl=%d \n",
2971                  phba->io_sgl_hndl_avbl,
2972                  phba->eh_sgl_hndl_avbl);
2973         mem_descr_sg = phba->init_mem;
2974         mem_descr_sg += HWI_MEM_SGE;
2975         SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
2976                  mem_descr_sg->num_elements);
2977         arr_index = 0;
2978         idx = 0;
2979         while (idx < mem_descr_sg->num_elements) {
2980                 pfrag = mem_descr_sg->mem_array[idx].virtual_address;
2981
2982                 for (i = 0;
2983                      i < (mem_descr_sg->mem_array[idx].size) /
2984                      (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
2985                      i++) {
2986                         if (arr_index < phba->params.ios_per_ctrl)
2987                                 psgl_handle = phba->io_sgl_hndl_base[arr_index];
2988                         else
2989                                 psgl_handle = phba->eh_sgl_hndl_base[arr_index -
2990                                                 phba->params.ios_per_ctrl];
2991                         psgl_handle->pfrag = pfrag;
2992                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
2993                         AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
2994                         pfrag += phba->params.num_sge_per_io;
2995                         psgl_handle->sgl_index =
2996                                 phba->fw_config.iscsi_icd_start + arr_index++;
2997                 }
2998                 idx++;
2999         }
3000         phba->io_sgl_free_index = 0;
3001         phba->io_sgl_alloc_index = 0;
3002         phba->eh_sgl_free_index = 0;
3003         phba->eh_sgl_alloc_index = 0;
3004         return 0;
3005 }
3006
3007 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3008 {
3009         int i, new_cid;
3010
3011         phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3012                                   GFP_KERNEL);
3013         if (!phba->cid_array) {
3014                 shost_printk(KERN_ERR, phba->shost,
3015                              "Failed to allocate memory in "
3016                              "hba_setup_cid_tbls\n");
3017                 return -ENOMEM;
3018         }
3019         phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3020                                  phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3021         if (!phba->ep_array) {
3022                 shost_printk(KERN_ERR, phba->shost,
3023                              "Failed to allocate memory in "
3024                              "hba_setup_cid_tbls \n");
3025                 kfree(phba->cid_array);
3026                 return -ENOMEM;
3027         }
3028         new_cid = phba->fw_config.iscsi_cid_start;
3029         for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3030                 phba->cid_array[i] = new_cid;
3031                 new_cid += 2;
3032         }
3033         phba->avlbl_cids = phba->params.cxns_per_ctrl;
3034         return 0;
3035 }
3036
3037 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3038 {
3039         struct be_ctrl_info *ctrl = &phba->ctrl;
3040         struct hwi_controller *phwi_ctrlr;
3041         struct hwi_context_memory *phwi_context;
3042         struct be_queue_info *eq;
3043         u8 __iomem *addr;
3044         u32 reg, i;
3045         u32 enabled;
3046
3047         phwi_ctrlr = phba->phwi_ctrlr;
3048         phwi_context = phwi_ctrlr->phwi_ctxt;
3049
3050         addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3051                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3052         reg = ioread32(addr);
3053         SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3054
3055         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3056         if (!enabled) {
3057                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3058                 SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3059                 iowrite32(reg, addr);
3060                 for (i = 0; i <= phba->num_cpus; i++) {
3061                         eq = &phwi_context->be_eq[i].q;
3062                         SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3063                         hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3064                 }
3065         } else
3066                 shost_printk(KERN_WARNING, phba->shost,
3067                              "In hwi_enable_intr, Not Enabled \n");
3068         return true;
3069 }
3070
3071 static void hwi_disable_intr(struct beiscsi_hba *phba)
3072 {
3073         struct be_ctrl_info *ctrl = &phba->ctrl;
3074
3075         u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3076         u32 reg = ioread32(addr);
3077
3078         u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3079         if (enabled) {
3080                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3081                 iowrite32(reg, addr);
3082         } else
3083                 shost_printk(KERN_WARNING, phba->shost,
3084                              "In hwi_disable_intr, Already Disabled \n");
3085 }
3086
3087 static int beiscsi_init_port(struct beiscsi_hba *phba)
3088 {
3089         int ret;
3090
3091         ret = beiscsi_init_controller(phba);
3092         if (ret < 0) {
3093                 shost_printk(KERN_ERR, phba->shost,
3094                              "beiscsi_dev_probe - Failed in"
3095                              "beiscsi_init_controller \n");
3096                 return ret;
3097         }
3098         ret = beiscsi_init_sgl_handle(phba);
3099         if (ret < 0) {
3100                 shost_printk(KERN_ERR, phba->shost,
3101                              "beiscsi_dev_probe - Failed in"
3102                              "beiscsi_init_sgl_handle \n");
3103                 goto do_cleanup_ctrlr;
3104         }
3105
3106         if (hba_setup_cid_tbls(phba)) {
3107                 shost_printk(KERN_ERR, phba->shost,
3108                              "Failed in hba_setup_cid_tbls\n");
3109                 kfree(phba->io_sgl_hndl_base);
3110                 kfree(phba->eh_sgl_hndl_base);
3111                 goto do_cleanup_ctrlr;
3112         }
3113
3114         return ret;
3115
3116 do_cleanup_ctrlr:
3117         hwi_cleanup(phba);
3118         return ret;
3119 }
3120
3121 static void hwi_purge_eq(struct beiscsi_hba *phba)
3122 {
3123         struct hwi_controller *phwi_ctrlr;
3124         struct hwi_context_memory *phwi_context;
3125         struct be_queue_info *eq;
3126         struct be_eq_entry *eqe = NULL;
3127         int i, eq_msix;
3128         unsigned int num_processed;
3129
3130         phwi_ctrlr = phba->phwi_ctrlr;
3131         phwi_context = phwi_ctrlr->phwi_ctxt;
3132         if (phba->msix_enabled)
3133                 eq_msix = 1;
3134         else
3135                 eq_msix = 0;
3136
3137         for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3138                 eq = &phwi_context->be_eq[i].q;
3139                 eqe = queue_tail_node(eq);
3140                 num_processed = 0;
3141                 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3142                                         & EQE_VALID_MASK) {
3143                         AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3144                         queue_tail_inc(eq);
3145                         eqe = queue_tail_node(eq);
3146                         num_processed++;
3147                 }
3148
3149                 if (num_processed)
3150                         hwi_ring_eq_db(phba, eq->id, 1, num_processed, 1, 1);
3151         }
3152 }
3153
3154 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3155 {
3156         unsigned char mgmt_status;
3157
3158         mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3159         if (mgmt_status)
3160                 shost_printk(KERN_WARNING, phba->shost,
3161                              "mgmt_epfw_cleanup FAILED \n");
3162
3163         hwi_purge_eq(phba);
3164         hwi_cleanup(phba);
3165         kfree(phba->io_sgl_hndl_base);
3166         kfree(phba->eh_sgl_hndl_base);
3167         kfree(phba->cid_array);
3168         kfree(phba->ep_array);
3169 }
3170
3171 void
3172 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3173                            struct beiscsi_offload_params *params)
3174 {
3175         struct wrb_handle *pwrb_handle;
3176         struct iscsi_target_context_update_wrb *pwrb = NULL;
3177         struct be_mem_descriptor *mem_descr;
3178         struct beiscsi_hba *phba = beiscsi_conn->phba;
3179         u32 doorbell = 0;
3180
3181         /*
3182          * We can always use 0 here because it is reserved by libiscsi for
3183          * login/startup related tasks.
3184          */
3185         pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3186                                        phba->fw_config.iscsi_cid_start));
3187         pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3188         memset(pwrb, 0, sizeof(*pwrb));
3189         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3190                       max_burst_length, pwrb, params->dw[offsetof
3191                       (struct amap_beiscsi_offload_params,
3192                       max_burst_length) / 32]);
3193         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3194                       max_send_data_segment_length, pwrb,
3195                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3196                       max_send_data_segment_length) / 32]);
3197         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3198                       first_burst_length,
3199                       pwrb,
3200                       params->dw[offsetof(struct amap_beiscsi_offload_params,
3201                       first_burst_length) / 32]);
3202
3203         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3204                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3205                       erl) / 32] & OFFLD_PARAMS_ERL));
3206         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3207                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3208                       dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3209         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3210                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3211                       hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3212         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3213                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3214                       ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3215         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3216                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3217                        imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3218         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3219                       pwrb,
3220                       (params->dw[offsetof(struct amap_beiscsi_offload_params,
3221                       exp_statsn) / 32] + 1));
3222         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3223                       0x7);
3224         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3225                       pwrb, pwrb_handle->wrb_index);
3226         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3227                       pwrb, pwrb_handle->nxt_wrb_index);
3228         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3229                         session_state, pwrb, 0);
3230         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3231                       pwrb, 1);
3232         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3233                       pwrb, 0);
3234         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3235                       0);
3236
3237         mem_descr = phba->init_mem;
3238         mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3239
3240         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3241                         pad_buffer_addr_hi, pwrb,
3242                       mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3243         AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3244                         pad_buffer_addr_lo, pwrb,
3245                       mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3246
3247         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3248
3249         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3250         doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3251                              << DB_DEF_PDU_WRB_INDEX_SHIFT;
3252         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3253
3254         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3255 }
3256
3257 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3258                               int *index, int *age)
3259 {
3260         *index = (int)itt;
3261         if (age)
3262                 *age = conn->session->age;
3263 }
3264
3265 /**
3266  * beiscsi_alloc_pdu - allocates pdu and related resources
3267  * @task: libiscsi task
3268  * @opcode: opcode of pdu for task
3269  *
3270  * This is called with the session lock held. It will allocate
3271  * the wrb and sgl if needed for the command. And it will prep
3272  * the pdu's itt. beiscsi_parse_pdu will later translate
3273  * the pdu itt to the libiscsi task itt.
3274  */
3275 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3276 {
3277         struct beiscsi_io_task *io_task = task->dd_data;
3278         struct iscsi_conn *conn = task->conn;
3279         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3280         struct beiscsi_hba *phba = beiscsi_conn->phba;
3281         struct hwi_wrb_context *pwrb_context;
3282         struct hwi_controller *phwi_ctrlr;
3283         itt_t itt;
3284         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3285         dma_addr_t paddr;
3286
3287         io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3288                                           GFP_KERNEL, &paddr);
3289         if (!io_task->cmd_bhs)
3290                 return -ENOMEM;
3291         io_task->bhs_pa.u.a64.address = paddr;
3292         io_task->libiscsi_itt = (itt_t)task->itt;
3293         io_task->pwrb_handle = alloc_wrb_handle(phba,
3294                                                 beiscsi_conn->beiscsi_conn_cid -
3295                                                 phba->fw_config.iscsi_cid_start
3296                                                 );
3297         io_task->conn = beiscsi_conn;
3298
3299         task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3300         task->hdr_max = sizeof(struct be_cmd_bhs);
3301
3302         if (task->sc) {
3303                 spin_lock(&phba->io_sgl_lock);
3304                 io_task->psgl_handle = alloc_io_sgl_handle(phba);
3305                 spin_unlock(&phba->io_sgl_lock);
3306                 if (!io_task->psgl_handle)
3307                         goto free_hndls;
3308         } else {
3309                 io_task->scsi_cmnd = NULL;
3310                 if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3311                         if (!beiscsi_conn->login_in_progress) {
3312                                 spin_lock(&phba->mgmt_sgl_lock);
3313                                 io_task->psgl_handle = (struct sgl_handle *)
3314                                                 alloc_mgmt_sgl_handle(phba);
3315                                 spin_unlock(&phba->mgmt_sgl_lock);
3316                                 if (!io_task->psgl_handle)
3317                                         goto free_hndls;
3318
3319                                 beiscsi_conn->login_in_progress = 1;
3320                                 beiscsi_conn->plogin_sgl_handle =
3321                                                         io_task->psgl_handle;
3322                         } else {
3323                                 io_task->psgl_handle =
3324                                                 beiscsi_conn->plogin_sgl_handle;
3325                         }
3326                 } else {
3327                         spin_lock(&phba->mgmt_sgl_lock);
3328                         io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3329                         spin_unlock(&phba->mgmt_sgl_lock);
3330                         if (!io_task->psgl_handle)
3331                                 goto free_hndls;
3332                 }
3333         }
3334         itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3335                                  wrb_index << 16) | (unsigned int)
3336                                 (io_task->psgl_handle->sgl_index));
3337         io_task->pwrb_handle->pio_handle = task;
3338
3339         io_task->cmd_bhs->iscsi_hdr.itt = itt;
3340         return 0;
3341
3342 free_hndls:
3343         phwi_ctrlr = phba->phwi_ctrlr;
3344         pwrb_context = &phwi_ctrlr->wrb_context[
3345                         beiscsi_conn->beiscsi_conn_cid -
3346                         phba->fw_config.iscsi_cid_start];
3347         free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3348         io_task->pwrb_handle = NULL;
3349         pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3350                       io_task->bhs_pa.u.a64.address);
3351         SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3352         return -ENOMEM;
3353 }
3354
3355 static void beiscsi_cleanup_task(struct iscsi_task *task)
3356 {
3357         struct beiscsi_io_task *io_task = task->dd_data;
3358         struct iscsi_conn *conn = task->conn;
3359         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3360         struct beiscsi_hba *phba = beiscsi_conn->phba;
3361         struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3362         struct hwi_wrb_context *pwrb_context;
3363         struct hwi_controller *phwi_ctrlr;
3364
3365         phwi_ctrlr = phba->phwi_ctrlr;
3366         pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3367                         - phba->fw_config.iscsi_cid_start];
3368         if (io_task->pwrb_handle) {
3369                 free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3370                 io_task->pwrb_handle = NULL;
3371         }
3372
3373         if (io_task->cmd_bhs) {
3374                 pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3375                               io_task->bhs_pa.u.a64.address);
3376         }
3377
3378         if (task->sc) {
3379                 if (io_task->psgl_handle) {
3380                         spin_lock(&phba->io_sgl_lock);
3381                         free_io_sgl_handle(phba, io_task->psgl_handle);
3382                         spin_unlock(&phba->io_sgl_lock);
3383                         io_task->psgl_handle = NULL;
3384                 }
3385         } else {
3386                 if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3387                         return;
3388                 if (io_task->psgl_handle) {
3389                         spin_lock(&phba->mgmt_sgl_lock);
3390                         free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3391                         spin_unlock(&phba->mgmt_sgl_lock);
3392                         io_task->psgl_handle = NULL;
3393                 }
3394         }
3395 }
3396
3397 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3398                           unsigned int num_sg, unsigned int xferlen,
3399                           unsigned int writedir)
3400 {
3401
3402         struct beiscsi_io_task *io_task = task->dd_data;
3403         struct iscsi_conn *conn = task->conn;
3404         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3405         struct beiscsi_hba *phba = beiscsi_conn->phba;
3406         struct iscsi_wrb *pwrb = NULL;
3407         unsigned int doorbell = 0;
3408
3409         pwrb = io_task->pwrb_handle->pwrb;
3410         io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3411         io_task->bhs_len = sizeof(struct be_cmd_bhs);
3412
3413         if (writedir) {
3414                 memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3415                 AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3416                               &io_task->cmd_bhs->iscsi_data_pdu,
3417                               (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3418                 AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3419                               &io_task->cmd_bhs->iscsi_data_pdu,
3420                               ISCSI_OPCODE_SCSI_DATA_OUT);
3421                 AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3422                               &io_task->cmd_bhs->iscsi_data_pdu, 1);
3423                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3424                               INI_WR_CMD);
3425                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3426         } else {
3427                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3428                               INI_RD_CMD);
3429                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3430         }
3431         memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3432                dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3433                io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3434
3435         AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3436                       cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3437                                   lun[0]));
3438         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3439         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3440                       io_task->pwrb_handle->wrb_index);
3441         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3442                       be32_to_cpu(task->cmdsn));
3443         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3444                       io_task->psgl_handle->sgl_index);
3445
3446         hwi_write_sgl(pwrb, sg, num_sg, io_task);
3447
3448         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3449                       io_task->pwrb_handle->nxt_wrb_index);
3450         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3451
3452         doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3453         doorbell |= (io_task->pwrb_handle->wrb_index &
3454                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3455         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3456
3457         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3458         return 0;
3459 }
3460
3461 static int beiscsi_mtask(struct iscsi_task *task)
3462 {
3463         struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3464         struct iscsi_conn *conn = task->conn;
3465         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3466         struct beiscsi_hba *phba = beiscsi_conn->phba;
3467         struct iscsi_session *session;
3468         struct iscsi_wrb *pwrb = NULL;
3469         struct hwi_controller *phwi_ctrlr;
3470         struct hwi_wrb_context *pwrb_context;
3471         struct wrb_handle *pwrb_handle;
3472         unsigned int doorbell = 0;
3473         unsigned int i, cid;
3474         struct iscsi_task *aborted_task;
3475         unsigned int tag;
3476
3477         cid = beiscsi_conn->beiscsi_conn_cid;
3478         pwrb = io_task->pwrb_handle->pwrb;
3479         AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3480                       be32_to_cpu(task->cmdsn));
3481         AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3482                       io_task->pwrb_handle->wrb_index);
3483         AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3484                       io_task->psgl_handle->sgl_index);
3485
3486         switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3487         case ISCSI_OP_LOGIN:
3488                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3489                               TGT_DM_CMD);
3490                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3491                 AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3492                 hwi_write_buffer(pwrb, task);
3493                 break;
3494         case ISCSI_OP_NOOP_OUT:
3495                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3496                               INI_RD_CMD);
3497                 if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3498                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3499                 else
3500                         AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3501                 hwi_write_buffer(pwrb, task);
3502                 break;
3503         case ISCSI_OP_TEXT:
3504                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3505                               INI_WR_CMD);
3506                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3507                 AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3508                 hwi_write_buffer(pwrb, task);
3509                 break;
3510         case ISCSI_OP_SCSI_TMFUNC:
3511                 session = conn->session;
3512                 i = ((struct iscsi_tm *)task->hdr)->rtt;
3513                 phwi_ctrlr = phba->phwi_ctrlr;
3514                 pwrb_context = &phwi_ctrlr->wrb_context[cid -
3515                                             phba->fw_config.iscsi_cid_start];
3516                 pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3517                                                                 >> 16];
3518                 aborted_task = pwrb_handle->pio_handle;
3519                  if (!aborted_task)
3520                         return 0;
3521
3522                 aborted_io_task = aborted_task->dd_data;
3523                 if (!aborted_io_task->scsi_cmnd)
3524                         return 0;
3525
3526                 tag = mgmt_invalidate_icds(phba,
3527                                      aborted_io_task->psgl_handle->sgl_index,
3528                                      cid);
3529                 if (!tag) {
3530                         shost_printk(KERN_WARNING, phba->shost,
3531                                      "mgmt_invalidate_icds could not be"
3532                                      " submitted\n");
3533                 } else {
3534                         wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3535                                                  phba->ctrl.mcc_numtag[tag]);
3536                         free_mcc_tag(&phba->ctrl, tag);
3537                 }
3538                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3539                               INI_TMF_CMD);
3540                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3541                 hwi_write_buffer(pwrb, task);
3542                 break;
3543         case ISCSI_OP_LOGOUT:
3544                 AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3545                 AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3546                                 HWH_TYPE_LOGOUT);
3547                 hwi_write_buffer(pwrb, task);
3548                 break;
3549
3550         default:
3551                 SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3552                          task->hdr->opcode & ISCSI_OPCODE_MASK);
3553                 return -EINVAL;
3554         }
3555
3556         AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3557                       task->data_count);
3558         AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3559                       io_task->pwrb_handle->nxt_wrb_index);
3560         be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3561
3562         doorbell |= cid & DB_WRB_POST_CID_MASK;
3563         doorbell |= (io_task->pwrb_handle->wrb_index &
3564                      DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3565         doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3566         iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3567         return 0;
3568 }
3569
3570 static int beiscsi_task_xmit(struct iscsi_task *task)
3571 {
3572         struct iscsi_conn *conn = task->conn;
3573         struct beiscsi_io_task *io_task = task->dd_data;
3574         struct scsi_cmnd *sc = task->sc;
3575         struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3576         struct scatterlist *sg;
3577         int num_sg;
3578         unsigned int  writedir = 0, xferlen = 0;
3579
3580         SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3581                  "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3582                  task, conn, beiscsi_conn);
3583         if (!sc)
3584                 return beiscsi_mtask(task);
3585
3586         io_task->scsi_cmnd = sc;
3587         num_sg = scsi_dma_map(sc);
3588         if (num_sg < 0) {
3589                 SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3590                 return num_sg;
3591         }
3592         SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3593                   (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3594         xferlen = scsi_bufflen(sc);
3595         sg = scsi_sglist(sc);
3596         if (sc->sc_data_direction == DMA_TO_DEVICE) {
3597                 writedir = 1;
3598                 SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3599                          task->imm_count);
3600         } else
3601                 writedir = 0;
3602         return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3603 }
3604
3605 static void beiscsi_remove(struct pci_dev *pcidev)
3606 {
3607         struct beiscsi_hba *phba = NULL;
3608         struct hwi_controller *phwi_ctrlr;
3609         struct hwi_context_memory *phwi_context;
3610         struct be_eq_obj *pbe_eq;
3611         unsigned int i, msix_vec;
3612
3613         phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3614         if (!phba) {
3615                 dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3616                 return;
3617         }
3618
3619         phwi_ctrlr = phba->phwi_ctrlr;
3620         phwi_context = phwi_ctrlr->phwi_ctxt;
3621         hwi_disable_intr(phba);
3622         if (phba->msix_enabled) {
3623                 for (i = 0; i <= phba->num_cpus; i++) {
3624                         msix_vec = phba->msix_entries[i].vector;
3625                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3626                 }
3627         } else
3628                 if (phba->pcidev->irq)
3629                         free_irq(phba->pcidev->irq, phba);
3630         pci_disable_msix(phba->pcidev);
3631         destroy_workqueue(phba->wq);
3632         if (blk_iopoll_enabled)
3633                 for (i = 0; i < phba->num_cpus; i++) {
3634                         pbe_eq = &phwi_context->be_eq[i];
3635                         blk_iopoll_disable(&pbe_eq->iopoll);
3636                 }
3637
3638         beiscsi_clean_port(phba);
3639         beiscsi_free_mem(phba);
3640         beiscsi_unmap_pci_function(phba);
3641         pci_free_consistent(phba->pcidev,
3642                             phba->ctrl.mbox_mem_alloced.size,
3643                             phba->ctrl.mbox_mem_alloced.va,
3644                             phba->ctrl.mbox_mem_alloced.dma);
3645         iscsi_host_remove(phba->shost);
3646         pci_dev_put(phba->pcidev);
3647         iscsi_host_free(phba->shost);
3648 }
3649
3650 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3651 {
3652         int i, status;
3653
3654         for (i = 0; i <= phba->num_cpus; i++)
3655                 phba->msix_entries[i].entry = i;
3656
3657         status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3658                                  (phba->num_cpus + 1));
3659         if (!status)
3660                 phba->msix_enabled = true;
3661
3662         return;
3663 }
3664
3665 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3666                                 const struct pci_device_id *id)
3667 {
3668         struct beiscsi_hba *phba = NULL;
3669         struct hwi_controller *phwi_ctrlr;
3670         struct hwi_context_memory *phwi_context;
3671         struct be_eq_obj *pbe_eq;
3672         int ret, msix_vec, num_cpus, i;
3673
3674         ret = beiscsi_enable_pci(pcidev);
3675         if (ret < 0) {
3676                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3677                              "Failed to enable pci device \n");
3678                 return ret;
3679         }
3680
3681         phba = beiscsi_hba_alloc(pcidev);
3682         if (!phba) {
3683                 dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3684                         " Failed in beiscsi_hba_alloc \n");
3685                 goto disable_pci;
3686         }
3687         SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3688
3689         if (enable_msix)
3690                 num_cpus = find_num_cpus();
3691         else
3692                 num_cpus = 1;
3693         phba->num_cpus = num_cpus;
3694         SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3695
3696         if (enable_msix)
3697                 beiscsi_msix_enable(phba);
3698         ret = be_ctrl_init(phba, pcidev);
3699         if (ret) {
3700                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3701                                 "Failed in be_ctrl_init\n");
3702                 goto hba_free;
3703         }
3704
3705         spin_lock_init(&phba->io_sgl_lock);
3706         spin_lock_init(&phba->mgmt_sgl_lock);
3707         spin_lock_init(&phba->isr_lock);
3708         ret = mgmt_get_fw_config(&phba->ctrl, phba);
3709         if (ret != 0) {
3710                 shost_printk(KERN_ERR, phba->shost,
3711                              "Error getting fw config\n");
3712                 goto free_port;
3713         }
3714         phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3715         beiscsi_get_params(phba);
3716         phba->shost->can_queue = phba->params.ios_per_ctrl;
3717         ret = beiscsi_init_port(phba);
3718         if (ret < 0) {
3719                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3720                              "Failed in beiscsi_init_port\n");
3721                 goto free_port;
3722         }
3723
3724         for (i = 0; i < MAX_MCC_CMD ; i++) {
3725                 init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3726                 phba->ctrl.mcc_tag[i] = i + 1;
3727                 phba->ctrl.mcc_numtag[i + 1] = 0;
3728                 phba->ctrl.mcc_tag_available++;
3729         }
3730
3731         phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3732
3733         snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3734                  phba->shost->host_no);
3735         phba->wq = create_workqueue(phba->wq_name);
3736         if (!phba->wq) {
3737                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3738                                 "Failed to allocate work queue\n");
3739                 goto free_twq;
3740         }
3741
3742         INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3743
3744         phwi_ctrlr = phba->phwi_ctrlr;
3745         phwi_context = phwi_ctrlr->phwi_ctxt;
3746         if (blk_iopoll_enabled) {
3747                 for (i = 0; i < phba->num_cpus; i++) {
3748                         pbe_eq = &phwi_context->be_eq[i];
3749                         blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3750                                         be_iopoll);
3751                         blk_iopoll_enable(&pbe_eq->iopoll);
3752                 }
3753         }
3754         ret = beiscsi_init_irqs(phba);
3755         if (ret < 0) {
3756                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3757                              "Failed to beiscsi_init_irqs\n");
3758                 goto free_blkenbld;
3759         }
3760         ret = hwi_enable_intr(phba);
3761         if (ret < 0) {
3762                 shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3763                              "Failed to hwi_enable_intr\n");
3764                 goto free_ctrlr;
3765         }
3766         SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3767         return 0;
3768
3769 free_ctrlr:
3770         if (phba->msix_enabled) {
3771                 for (i = 0; i <= phba->num_cpus; i++) {
3772                         msix_vec = phba->msix_entries[i].vector;
3773                         free_irq(msix_vec, &phwi_context->be_eq[i]);
3774                 }
3775         } else
3776                 if (phba->pcidev->irq)
3777                         free_irq(phba->pcidev->irq, phba);
3778         pci_disable_msix(phba->pcidev);
3779 free_blkenbld:
3780         destroy_workqueue(phba->wq);
3781         if (blk_iopoll_enabled)
3782                 for (i = 0; i < phba->num_cpus; i++) {
3783                         pbe_eq = &phwi_context->be_eq[i];
3784                         blk_iopoll_disable(&pbe_eq->iopoll);
3785                 }
3786 free_twq:
3787         beiscsi_clean_port(phba);
3788         beiscsi_free_mem(phba);
3789 free_port:
3790         pci_free_consistent(phba->pcidev,
3791                             phba->ctrl.mbox_mem_alloced.size,
3792                             phba->ctrl.mbox_mem_alloced.va,
3793                            phba->ctrl.mbox_mem_alloced.dma);
3794         beiscsi_unmap_pci_function(phba);
3795 hba_free:
3796         iscsi_host_remove(phba->shost);
3797         pci_dev_put(phba->pcidev);
3798         iscsi_host_free(phba->shost);
3799 disable_pci:
3800         pci_disable_device(pcidev);
3801         return ret;
3802 }
3803
3804 struct iscsi_transport beiscsi_iscsi_transport = {
3805         .owner = THIS_MODULE,
3806         .name = DRV_NAME,
3807         .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3808                 CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3809         .param_mask = ISCSI_MAX_RECV_DLENGTH |
3810                 ISCSI_MAX_XMIT_DLENGTH |
3811                 ISCSI_HDRDGST_EN |
3812                 ISCSI_DATADGST_EN |
3813                 ISCSI_INITIAL_R2T_EN |
3814                 ISCSI_MAX_R2T |
3815                 ISCSI_IMM_DATA_EN |
3816                 ISCSI_FIRST_BURST |
3817                 ISCSI_MAX_BURST |
3818                 ISCSI_PDU_INORDER_EN |
3819                 ISCSI_DATASEQ_INORDER_EN |
3820                 ISCSI_ERL |
3821                 ISCSI_CONN_PORT |
3822                 ISCSI_CONN_ADDRESS |
3823                 ISCSI_EXP_STATSN |
3824                 ISCSI_PERSISTENT_PORT |
3825                 ISCSI_PERSISTENT_ADDRESS |
3826                 ISCSI_TARGET_NAME | ISCSI_TPGT |
3827                 ISCSI_USERNAME | ISCSI_PASSWORD |
3828                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3829                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3830                 ISCSI_LU_RESET_TMO |
3831                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
3832                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3833         .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3834                                 ISCSI_HOST_INITIATOR_NAME,
3835         .create_session = beiscsi_session_create,
3836         .destroy_session = beiscsi_session_destroy,
3837         .create_conn = beiscsi_conn_create,
3838         .bind_conn = beiscsi_conn_bind,
3839         .destroy_conn = iscsi_conn_teardown,
3840         .set_param = beiscsi_set_param,
3841         .get_conn_param = beiscsi_conn_get_param,
3842         .get_session_param = iscsi_session_get_param,
3843         .get_host_param = beiscsi_get_host_param,
3844         .start_conn = beiscsi_conn_start,
3845         .stop_conn = beiscsi_conn_stop,
3846         .send_pdu = iscsi_conn_send_pdu,
3847         .xmit_task = beiscsi_task_xmit,
3848         .cleanup_task = beiscsi_cleanup_task,
3849         .alloc_pdu = beiscsi_alloc_pdu,
3850         .parse_pdu_itt = beiscsi_parse_pdu,
3851         .get_stats = beiscsi_conn_get_stats,
3852         .ep_connect = beiscsi_ep_connect,
3853         .ep_poll = beiscsi_ep_poll,
3854         .ep_disconnect = beiscsi_ep_disconnect,
3855         .session_recovery_timedout = iscsi_session_recovery_timedout,
3856 };
3857
3858 static struct pci_driver beiscsi_pci_driver = {
3859         .name = DRV_NAME,
3860         .probe = beiscsi_dev_probe,
3861         .remove = beiscsi_remove,
3862         .id_table = beiscsi_pci_id_table
3863 };
3864
3865
3866 static int __init beiscsi_module_init(void)
3867 {
3868         int ret;
3869
3870         beiscsi_scsi_transport =
3871                         iscsi_register_transport(&beiscsi_iscsi_transport);
3872         if (!beiscsi_scsi_transport) {
3873                 SE_DEBUG(DBG_LVL_1,
3874                          "beiscsi_module_init - Unable to  register beiscsi"
3875                          "transport.\n");
3876                 ret = -ENOMEM;
3877         }
3878         SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3879                  &beiscsi_iscsi_transport);
3880
3881         ret = pci_register_driver(&beiscsi_pci_driver);
3882         if (ret) {
3883                 SE_DEBUG(DBG_LVL_1,
3884                          "beiscsi_module_init - Unable to  register"
3885                          "beiscsi pci driver.\n");
3886                 goto unregister_iscsi_transport;
3887         }
3888         return 0;
3889
3890 unregister_iscsi_transport:
3891         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3892         return ret;
3893 }
3894
3895 static void __exit beiscsi_module_exit(void)
3896 {
3897         pci_unregister_driver(&beiscsi_pci_driver);
3898         iscsi_unregister_transport(&beiscsi_iscsi_transport);
3899 }
3900
3901 module_init(beiscsi_module_init);
3902 module_exit(beiscsi_module_exit);