perf, x86: Fix key indexing in Pentium-4 PMU
[linux-2.6-block.git] / drivers / scsi / be2iscsi / be_cmds.c
1 /**
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_mgmt.h"
20 #include "be_main.h"
21
22 void be_mcc_notify(struct beiscsi_hba *phba)
23 {
24         struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
25         u32 val = 0;
26
27         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
28         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
29         iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
30 }
31
32 unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
33 {
34         unsigned int tag = 0;
35         unsigned int num = 0;
36
37 mcc_tag_rdy:
38         if (phba->ctrl.mcc_tag_available) {
39                 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
40                 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
41                 phba->ctrl.mcc_numtag[tag] = 0;
42         } else {
43                 udelay(100);
44                 num++;
45                 if (num < mcc_timeout)
46                         goto mcc_tag_rdy;
47         }
48         if (tag) {
49                 phba->ctrl.mcc_tag_available--;
50                 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
51                         phba->ctrl.mcc_alloc_index = 0;
52                 else
53                         phba->ctrl.mcc_alloc_index++;
54         }
55         return tag;
56 }
57
58 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
59 {
60         spin_lock(&ctrl->mbox_lock);
61         tag = tag & 0x000000FF;
62         ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
63         if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
64                 ctrl->mcc_free_index = 0;
65         else
66                 ctrl->mcc_free_index++;
67         ctrl->mcc_tag_available++;
68         spin_unlock(&ctrl->mbox_lock);
69 }
70
71 bool is_link_state_evt(u32 trailer)
72 {
73         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
74                   ASYNC_TRAILER_EVENT_CODE_MASK) ==
75                   ASYNC_EVENT_CODE_LINK_STATE);
76 }
77
78 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
79 {
80         if (compl->flags != 0) {
81                 compl->flags = le32_to_cpu(compl->flags);
82                 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
83                 return true;
84         } else
85                 return false;
86 }
87
88 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
89 {
90         compl->flags = 0;
91 }
92
93 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
94                                 struct be_mcc_compl *compl)
95 {
96         u16 compl_status, extd_status;
97
98         be_dws_le_to_cpu(compl, 4);
99
100         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
101                                         CQE_STATUS_COMPL_MASK;
102         if (compl_status != MCC_STATUS_SUCCESS) {
103                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
104                                                 CQE_STATUS_EXTD_MASK;
105                 dev_err(&ctrl->pdev->dev,
106                         "error in cmd completion: status(compl/extd)=%d/%d\n",
107                         compl_status, extd_status);
108                 return -1;
109         }
110         return 0;
111 }
112
113 int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
114                                     struct be_mcc_compl *compl)
115 {
116         u16 compl_status, extd_status;
117         unsigned short tag;
118
119         be_dws_le_to_cpu(compl, 4);
120
121         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
122                                         CQE_STATUS_COMPL_MASK;
123         /* The ctrl.mcc_numtag[tag] is filled with
124          * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
125          * [7:0] = compl_status
126          */
127         tag = (compl->tag0 & 0x000000FF);
128         extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
129                                         CQE_STATUS_EXTD_MASK;
130
131         ctrl->mcc_numtag[tag]  = 0x80000000;
132         ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
133         ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
134         ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
135         wake_up_interruptible(&ctrl->mcc_wait[tag]);
136         return 0;
137 }
138
139 static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
140 {
141         struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
142         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
143
144         if (be_mcc_compl_is_new(compl)) {
145                 queue_tail_inc(mcc_cq);
146                 return compl;
147         }
148         return NULL;
149 }
150
151 static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
152 {
153         iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
154 }
155
156 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
157                 struct be_async_event_link_state *evt)
158 {
159         switch (evt->port_link_status) {
160         case ASYNC_EVENT_LINK_DOWN:
161                 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
162                                                 evt->physical_port);
163                 phba->state |= BE_ADAPTER_LINK_DOWN;
164                 iscsi_host_for_each_session(phba->shost,
165                                             be2iscsi_fail_session);
166                 break;
167         case ASYNC_EVENT_LINK_UP:
168                 phba->state = BE_ADAPTER_UP;
169                 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
170                                                 evt->physical_port);
171                 break;
172         default:
173                 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
174                                     "Physical Port %d \n",
175                                      evt->port_link_status,
176                                      evt->physical_port);
177         }
178 }
179
180 static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
181                        u16 num_popped)
182 {
183         u32 val = 0;
184         val |= qid & DB_CQ_RING_ID_MASK;
185         if (arm)
186                 val |= 1 << DB_CQ_REARM_SHIFT;
187         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
188         iowrite32(val, phba->db_va + DB_CQ_OFFSET);
189 }
190
191
192 int beiscsi_process_mcc(struct beiscsi_hba *phba)
193 {
194         struct be_mcc_compl *compl;
195         int num = 0, status = 0;
196         struct be_ctrl_info *ctrl = &phba->ctrl;
197
198         spin_lock_bh(&phba->ctrl.mcc_cq_lock);
199         while ((compl = be_mcc_compl_get(phba))) {
200                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
201                         /* Interpret flags as an async trailer */
202                         if (is_link_state_evt(compl->flags))
203                                 /* Interpret compl as a async link evt */
204                                 beiscsi_async_link_state_process(phba,
205                                    (struct be_async_event_link_state *) compl);
206                         else
207                                 SE_DEBUG(DBG_LVL_1,
208                                          " Unsupported Async Event, flags"
209                                          " = 0x%08x \n", compl->flags);
210
211                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
212                                 status = be_mcc_compl_process(ctrl, compl);
213                                 atomic_dec(&phba->ctrl.mcc_obj.q.used);
214                 }
215                 be_mcc_compl_use(compl);
216                 num++;
217         }
218
219         if (num)
220                 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
221
222         spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
223         return status;
224 }
225
226 /* Wait till no more pending mcc requests are present */
227 static int be_mcc_wait_compl(struct beiscsi_hba *phba)
228 {
229         int i, status;
230         for (i = 0; i < mcc_timeout; i++) {
231                 status = beiscsi_process_mcc(phba);
232                 if (status)
233                         return status;
234
235                 if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
236                         break;
237                 udelay(100);
238         }
239         if (i == mcc_timeout) {
240                 dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
241                 return -1;
242         }
243         return 0;
244 }
245
246 /* Notify MCC requests and wait for completion */
247 int be_mcc_notify_wait(struct beiscsi_hba *phba)
248 {
249         be_mcc_notify(phba);
250         return be_mcc_wait_compl(phba);
251 }
252
253 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
254 {
255 #define long_delay 2000
256         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
257         int cnt = 0, wait = 5;  /* in usecs */
258         u32 ready;
259
260         do {
261                 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
262                 if (ready)
263                         break;
264
265                 if (cnt > 6000000) {
266                         dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
267                         return -1;
268                 }
269
270                 if (cnt > 50) {
271                         wait = long_delay;
272                         mdelay(long_delay / 1000);
273                 } else
274                         udelay(wait);
275                 cnt += wait;
276         } while (true);
277         return 0;
278 }
279
280 int be_mbox_notify(struct be_ctrl_info *ctrl)
281 {
282         int status;
283         u32 val = 0;
284         void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
285         struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
286         struct be_mcc_mailbox *mbox = mbox_mem->va;
287         struct be_mcc_compl *compl = &mbox->compl;
288
289         val &= ~MPU_MAILBOX_DB_RDY_MASK;
290         val |= MPU_MAILBOX_DB_HI_MASK;
291         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
292         iowrite32(val, db);
293
294         status = be_mbox_db_ready_wait(ctrl);
295         if (status != 0) {
296                 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
297                 return status;
298         }
299         val = 0;
300         val &= ~MPU_MAILBOX_DB_RDY_MASK;
301         val &= ~MPU_MAILBOX_DB_HI_MASK;
302         val |= (u32) (mbox_mem->dma >> 4) << 2;
303         iowrite32(val, db);
304
305         status = be_mbox_db_ready_wait(ctrl);
306         if (status != 0) {
307                 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
308                 return status;
309         }
310         if (be_mcc_compl_is_new(compl)) {
311                 status = be_mcc_compl_process(ctrl, &mbox->compl);
312                 be_mcc_compl_use(compl);
313                 if (status) {
314                         SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
315                         return status;
316                 }
317         } else {
318                 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
319                 return -1;
320         }
321         return 0;
322 }
323
324 /*
325  * Insert the mailbox address into the doorbell in two steps
326  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
327  */
328 static int be_mbox_notify_wait(struct beiscsi_hba *phba)
329 {
330         int status;
331         u32 val = 0;
332         void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
333         struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
334         struct be_mcc_mailbox *mbox = mbox_mem->va;
335         struct be_mcc_compl *compl = &mbox->compl;
336         struct be_ctrl_info *ctrl = &phba->ctrl;
337
338         val |= MPU_MAILBOX_DB_HI_MASK;
339         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
340         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
341         iowrite32(val, db);
342
343         /* wait for ready to be set */
344         status = be_mbox_db_ready_wait(ctrl);
345         if (status != 0)
346                 return status;
347
348         val = 0;
349         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
350         val |= (u32)(mbox_mem->dma >> 4) << 2;
351         iowrite32(val, db);
352
353         status = be_mbox_db_ready_wait(ctrl);
354         if (status != 0)
355                 return status;
356
357         /* A cq entry has been made now */
358         if (be_mcc_compl_is_new(compl)) {
359                 status = be_mcc_compl_process(ctrl, &mbox->compl);
360                 be_mcc_compl_use(compl);
361                 if (status)
362                         return status;
363         } else {
364                 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
365                 return -1;
366         }
367         return 0;
368 }
369
370 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
371                                 bool embedded, u8 sge_cnt)
372 {
373         if (embedded)
374                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
375         else
376                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
377                                                 MCC_WRB_SGE_CNT_SHIFT;
378         wrb->payload_length = payload_len;
379         be_dws_cpu_to_le(wrb, 8);
380 }
381
382 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
383                         u8 subsystem, u8 opcode, int cmd_len)
384 {
385         req_hdr->opcode = opcode;
386         req_hdr->subsystem = subsystem;
387         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
388 }
389
390 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
391                                                         struct be_dma_mem *mem)
392 {
393         int i, buf_pages;
394         u64 dma = (u64) mem->dma;
395
396         buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
397         for (i = 0; i < buf_pages; i++) {
398                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
399                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
400                 dma += PAGE_SIZE_4K;
401         }
402 }
403
404 static u32 eq_delay_to_mult(u32 usec_delay)
405 {
406 #define MAX_INTR_RATE 651042
407         const u32 round = 10;
408         u32 multiplier;
409
410         if (usec_delay == 0)
411                 multiplier = 0;
412         else {
413                 u32 interrupt_rate = 1000000 / usec_delay;
414                 if (interrupt_rate == 0)
415                         multiplier = 1023;
416                 else {
417                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
418                         multiplier /= interrupt_rate;
419                         multiplier = (multiplier + round / 2) / round;
420                         multiplier = min(multiplier, (u32) 1023);
421                 }
422         }
423         return multiplier;
424 }
425
426 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
427 {
428         return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
429 }
430
431 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
432 {
433         struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
434         struct be_mcc_wrb *wrb;
435
436         BUG_ON(atomic_read(&mccq->used) >= mccq->len);
437         wrb = queue_head_node(mccq);
438         memset(wrb, 0, sizeof(*wrb));
439         wrb->tag0 = (mccq->head & 0x000000FF) << 16;
440         queue_head_inc(mccq);
441         atomic_inc(&mccq->used);
442         return wrb;
443 }
444
445
446 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
447                           struct be_queue_info *eq, int eq_delay)
448 {
449         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
450         struct be_cmd_req_eq_create *req = embedded_payload(wrb);
451         struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
452         struct be_dma_mem *q_mem = &eq->dma_mem;
453         int status;
454
455         SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
456         spin_lock(&ctrl->mbox_lock);
457         memset(wrb, 0, sizeof(*wrb));
458
459         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
460
461         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
462                         OPCODE_COMMON_EQ_CREATE, sizeof(*req));
463
464         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
465
466         AMAP_SET_BITS(struct amap_eq_context, func, req->context,
467                                                 PCI_FUNC(ctrl->pdev->devfn));
468         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
469         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
470         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
471                                         __ilog2_u32(eq->len / 256));
472         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
473                                         eq_delay_to_mult(eq_delay));
474         be_dws_cpu_to_le(req->context, sizeof(req->context));
475
476         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
477
478         status = be_mbox_notify(ctrl);
479         if (!status) {
480                 eq->id = le16_to_cpu(resp->eq_id);
481                 eq->created = true;
482         }
483         spin_unlock(&ctrl->mbox_lock);
484         return status;
485 }
486
487 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
488 {
489         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
490         int status;
491         u8 *endian_check;
492
493         SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
494         spin_lock(&ctrl->mbox_lock);
495         memset(wrb, 0, sizeof(*wrb));
496
497         endian_check = (u8 *) wrb;
498         *endian_check++ = 0xFF;
499         *endian_check++ = 0x12;
500         *endian_check++ = 0x34;
501         *endian_check++ = 0xFF;
502         *endian_check++ = 0xFF;
503         *endian_check++ = 0x56;
504         *endian_check++ = 0x78;
505         *endian_check++ = 0xFF;
506         be_dws_cpu_to_le(wrb, sizeof(*wrb));
507
508         status = be_mbox_notify(ctrl);
509         if (status)
510                 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
511
512         spin_unlock(&ctrl->mbox_lock);
513         return status;
514 }
515
516 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
517                           struct be_queue_info *cq, struct be_queue_info *eq,
518                           bool sol_evts, bool no_delay, int coalesce_wm)
519 {
520         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
521         struct be_cmd_req_cq_create *req = embedded_payload(wrb);
522         struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
523         struct be_dma_mem *q_mem = &cq->dma_mem;
524         void *ctxt = &req->context;
525         int status;
526
527         SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
528         spin_lock(&ctrl->mbox_lock);
529         memset(wrb, 0, sizeof(*wrb));
530
531         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
532
533         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
534                         OPCODE_COMMON_CQ_CREATE, sizeof(*req));
535         if (!q_mem->va)
536                 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
537
538         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
539
540         AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
541         AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
542         AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
543                       __ilog2_u32(cq->len / 256));
544         AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
545         AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
546         AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
547         AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
548         AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
549         AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
550                       PCI_FUNC(ctrl->pdev->devfn));
551         be_dws_cpu_to_le(ctxt, sizeof(req->context));
552
553         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
554
555         status = be_mbox_notify(ctrl);
556         if (!status) {
557                 cq->id = le16_to_cpu(resp->cq_id);
558                 cq->created = true;
559         } else
560                 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
561                         status);
562         spin_unlock(&ctrl->mbox_lock);
563
564         return status;
565 }
566
567 static u32 be_encoded_q_len(int q_len)
568 {
569         u32 len_encoded = fls(q_len);   /* log2(len) + 1 */
570         if (len_encoded == 16)
571                 len_encoded = 0;
572         return len_encoded;
573 }
574
575 int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
576                         struct be_queue_info *mccq,
577                         struct be_queue_info *cq)
578 {
579         struct be_mcc_wrb *wrb;
580         struct be_cmd_req_mcc_create *req;
581         struct be_dma_mem *q_mem = &mccq->dma_mem;
582         struct be_ctrl_info *ctrl;
583         void *ctxt;
584         int status;
585
586         spin_lock(&phba->ctrl.mbox_lock);
587         ctrl = &phba->ctrl;
588         wrb = wrb_from_mbox(&ctrl->mbox_mem);
589         req = embedded_payload(wrb);
590         ctxt = &req->context;
591
592         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
593
594         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
595                         OPCODE_COMMON_MCC_CREATE, sizeof(*req));
596
597         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
598
599         AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
600                       PCI_FUNC(phba->pcidev->devfn));
601         AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
602         AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
603                 be_encoded_q_len(mccq->len));
604         AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
605
606         be_dws_cpu_to_le(ctxt, sizeof(req->context));
607
608         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
609
610         status = be_mbox_notify_wait(phba);
611         if (!status) {
612                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
613                 mccq->id = le16_to_cpu(resp->id);
614                 mccq->created = true;
615         }
616         spin_unlock(&phba->ctrl.mbox_lock);
617
618         return status;
619 }
620
621 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
622                           int queue_type)
623 {
624         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
625         struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
626         u8 subsys = 0, opcode = 0;
627         int status;
628
629         SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
630         spin_lock(&ctrl->mbox_lock);
631         memset(wrb, 0, sizeof(*wrb));
632         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
633
634         switch (queue_type) {
635         case QTYPE_EQ:
636                 subsys = CMD_SUBSYSTEM_COMMON;
637                 opcode = OPCODE_COMMON_EQ_DESTROY;
638                 break;
639         case QTYPE_CQ:
640                 subsys = CMD_SUBSYSTEM_COMMON;
641                 opcode = OPCODE_COMMON_CQ_DESTROY;
642                 break;
643         case QTYPE_MCCQ:
644                 subsys = CMD_SUBSYSTEM_COMMON;
645                 opcode = OPCODE_COMMON_MCC_DESTROY;
646                 break;
647         case QTYPE_WRBQ:
648                 subsys = CMD_SUBSYSTEM_ISCSI;
649                 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
650                 break;
651         case QTYPE_DPDUQ:
652                 subsys = CMD_SUBSYSTEM_ISCSI;
653                 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
654                 break;
655         case QTYPE_SGL:
656                 subsys = CMD_SUBSYSTEM_ISCSI;
657                 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
658                 break;
659         default:
660                 spin_unlock(&ctrl->mbox_lock);
661                 BUG();
662                 return -1;
663         }
664         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
665         if (queue_type != QTYPE_SGL)
666                 req->id = cpu_to_le16(q->id);
667
668         status = be_mbox_notify(ctrl);
669
670         spin_unlock(&ctrl->mbox_lock);
671         return status;
672 }
673
674 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
675                                     struct be_queue_info *cq,
676                                     struct be_queue_info *dq, int length,
677                                     int entry_size)
678 {
679         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
680         struct be_defq_create_req *req = embedded_payload(wrb);
681         struct be_dma_mem *q_mem = &dq->dma_mem;
682         void *ctxt = &req->context;
683         int status;
684
685         SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
686         spin_lock(&ctrl->mbox_lock);
687         memset(wrb, 0, sizeof(*wrb));
688
689         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
690
691         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
692                            OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
693
694         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
695         AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
696         AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
697                       1);
698         AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
699                       PCI_FUNC(ctrl->pdev->devfn));
700         AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
701                       be_encoded_q_len(length / sizeof(struct phys_addr)));
702         AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
703                       ctxt, entry_size);
704         AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
705                       cq->id);
706
707         be_dws_cpu_to_le(ctxt, sizeof(req->context));
708
709         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
710
711         status = be_mbox_notify(ctrl);
712         if (!status) {
713                 struct be_defq_create_resp *resp = embedded_payload(wrb);
714
715                 dq->id = le16_to_cpu(resp->id);
716                 dq->created = true;
717         }
718         spin_unlock(&ctrl->mbox_lock);
719
720         return status;
721 }
722
723 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
724                        struct be_queue_info *wrbq)
725 {
726         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
727         struct be_wrbq_create_req *req = embedded_payload(wrb);
728         struct be_wrbq_create_resp *resp = embedded_payload(wrb);
729         int status;
730
731         spin_lock(&ctrl->mbox_lock);
732         memset(wrb, 0, sizeof(*wrb));
733
734         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
735
736         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
737                 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
738         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
739         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
740
741         status = be_mbox_notify(ctrl);
742         if (!status) {
743                 wrbq->id = le16_to_cpu(resp->cid);
744                 wrbq->created = true;
745         }
746         spin_unlock(&ctrl->mbox_lock);
747         return status;
748 }
749
750 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
751                                 struct be_dma_mem *q_mem,
752                                 u32 page_offset, u32 num_pages)
753 {
754         struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
755         struct be_post_sgl_pages_req *req = embedded_payload(wrb);
756         int status;
757         unsigned int curr_pages;
758         u32 internal_page_offset = 0;
759         u32 temp_num_pages = num_pages;
760
761         if (num_pages == 0xff)
762                 num_pages = 1;
763
764         spin_lock(&ctrl->mbox_lock);
765         do {
766                 memset(wrb, 0, sizeof(*wrb));
767                 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
768                 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
769                                    OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
770                                    sizeof(*req));
771                 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
772                                                 pages);
773                 req->num_pages = min(num_pages, curr_pages);
774                 req->page_offset = page_offset;
775                 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
776                 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
777                 internal_page_offset += req->num_pages;
778                 page_offset += req->num_pages;
779                 num_pages -= req->num_pages;
780
781                 if (temp_num_pages == 0xff)
782                         req->num_pages = temp_num_pages;
783
784                 status = be_mbox_notify(ctrl);
785                 if (status) {
786                         SE_DEBUG(DBG_LVL_1,
787                                  "FW CMD to map iscsi frags failed.\n");
788                         goto error;
789                 }
790         } while (num_pages > 0);
791 error:
792         spin_unlock(&ctrl->mbox_lock);
793         if (status != 0)
794                 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
795         return status;
796 }