Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-2.6-block.git] / drivers / net / ethernet / qlogic / qed / qed_spq.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_mcp.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31 #include "qed_roce.h"
32
33 /***************************************************************************
34 * Structures & Definitions
35 ***************************************************************************/
36
37 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
38
39 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
40 #define SPQ_BLOCK_DELAY_US              (10)
41 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
42 #define SPQ_BLOCK_SLEEP_MS              (5)
43
44 /***************************************************************************
45 * Blocking Imp. (BLOCK/EBLOCK mode)
46 ***************************************************************************/
47 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
48                                 void *cookie,
49                                 union event_ring_data *data, u8 fw_return_code)
50 {
51         struct qed_spq_comp_done *comp_done;
52
53         comp_done = (struct qed_spq_comp_done *)cookie;
54
55         comp_done->fw_return_code = fw_return_code;
56
57         /* Make sure completion done is visible on waiting thread */
58         smp_store_release(&comp_done->done, 0x1);
59 }
60
61 static int __qed_spq_block(struct qed_hwfn *p_hwfn,
62                            struct qed_spq_entry *p_ent,
63                            u8 *p_fw_ret, bool sleep_between_iter)
64 {
65         struct qed_spq_comp_done *comp_done;
66         u32 iter_cnt;
67
68         comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
69         iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
70                                       : SPQ_BLOCK_DELAY_MAX_ITER;
71
72         while (iter_cnt--) {
73                 /* Validate we receive completion update */
74                 if (READ_ONCE(comp_done->done) == 1) {
75                         /* Read updated FW return value */
76                         smp_read_barrier_depends();
77                         if (p_fw_ret)
78                                 *p_fw_ret = comp_done->fw_return_code;
79                         return 0;
80                 }
81
82                 if (sleep_between_iter)
83                         msleep(SPQ_BLOCK_SLEEP_MS);
84                 else
85                         udelay(SPQ_BLOCK_DELAY_US);
86         }
87
88         return -EBUSY;
89 }
90
91 static int qed_spq_block(struct qed_hwfn *p_hwfn,
92                          struct qed_spq_entry *p_ent,
93                          u8 *p_fw_ret, bool skip_quick_poll)
94 {
95         struct qed_spq_comp_done *comp_done;
96         int rc;
97
98         /* A relatively short polling period w/o sleeping, to allow the FW to
99          * complete the ramrod and thus possibly to avoid the following sleeps.
100          */
101         if (!skip_quick_poll) {
102                 rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, false);
103                 if (!rc)
104                         return 0;
105         }
106
107         /* Move to polling with a sleeping period between iterations */
108         rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
109         if (!rc)
110                 return 0;
111
112         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
113         rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
114         if (rc) {
115                 DP_NOTICE(p_hwfn, "MCP drain failed\n");
116                 goto err;
117         }
118
119         /* Retry after drain */
120         rc = __qed_spq_block(p_hwfn, p_ent, p_fw_ret, true);
121         if (!rc)
122                 return 0;
123
124         comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
125         if (comp_done->done == 1) {
126                 if (p_fw_ret)
127                         *p_fw_ret = comp_done->fw_return_code;
128                 return 0;
129         }
130 err:
131         DP_NOTICE(p_hwfn,
132                   "Ramrod is stuck [CID %08x cmd %02x protocol %02x echo %04x]\n",
133                   le32_to_cpu(p_ent->elem.hdr.cid),
134                   p_ent->elem.hdr.cmd_id,
135                   p_ent->elem.hdr.protocol_id,
136                   le16_to_cpu(p_ent->elem.hdr.echo));
137
138         return -EBUSY;
139 }
140
141 /***************************************************************************
142 * SPQ entries inner API
143 ***************************************************************************/
144 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
145                               struct qed_spq_entry *p_ent)
146 {
147         p_ent->flags = 0;
148
149         switch (p_ent->comp_mode) {
150         case QED_SPQ_MODE_EBLOCK:
151         case QED_SPQ_MODE_BLOCK:
152                 p_ent->comp_cb.function = qed_spq_blocking_cb;
153                 break;
154         case QED_SPQ_MODE_CB:
155                 break;
156         default:
157                 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
158                           p_ent->comp_mode);
159                 return -EINVAL;
160         }
161
162         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
163                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
164                    p_ent->elem.hdr.cid,
165                    p_ent->elem.hdr.cmd_id,
166                    p_ent->elem.hdr.protocol_id,
167                    p_ent->elem.data_ptr.hi,
168                    p_ent->elem.data_ptr.lo,
169                    D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
170                            QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
171                            "MODE_CB"));
172
173         return 0;
174 }
175
176 /***************************************************************************
177 * HSI access
178 ***************************************************************************/
179 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
180                                   struct qed_spq *p_spq)
181 {
182         u16                             pq;
183         struct qed_cxt_info             cxt_info;
184         struct core_conn_context        *p_cxt;
185         union qed_qm_pq_params          pq_params;
186         int                             rc;
187
188         cxt_info.iid = p_spq->cid;
189
190         rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
191
192         if (rc < 0) {
193                 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
194                           p_spq->cid);
195                 return;
196         }
197
198         p_cxt = cxt_info.p_cxt;
199
200         SET_FIELD(p_cxt->xstorm_ag_context.flags10,
201                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
202         SET_FIELD(p_cxt->xstorm_ag_context.flags1,
203                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
204         SET_FIELD(p_cxt->xstorm_ag_context.flags9,
205                   XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
206
207         /* QM physical queue */
208         memset(&pq_params, 0, sizeof(pq_params));
209         pq_params.core.tc = LB_TC;
210         pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
211         p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
212
213         p_cxt->xstorm_st_context.spq_base_lo =
214                 DMA_LO_LE(p_spq->chain.p_phys_addr);
215         p_cxt->xstorm_st_context.spq_base_hi =
216                 DMA_HI_LE(p_spq->chain.p_phys_addr);
217
218         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
219                        p_hwfn->p_consq->chain.p_phys_addr);
220 }
221
222 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
223                            struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
224 {
225         struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
226         u16 echo = qed_chain_get_prod_idx(p_chain);
227         struct slow_path_element        *elem;
228         struct core_db_data             db;
229
230         p_ent->elem.hdr.echo    = cpu_to_le16(echo);
231         elem = qed_chain_produce(p_chain);
232         if (!elem) {
233                 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
234                 return -EINVAL;
235         }
236
237         *elem = p_ent->elem; /* struct assignment */
238
239         /* send a doorbell on the slow hwfn session */
240         memset(&db, 0, sizeof(db));
241         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
242         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
243         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
244                   DQ_XCM_CORE_SPQ_PROD_CMD);
245         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
246         db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
247
248         /* make sure the SPQE is updated before the doorbell */
249         wmb();
250
251         DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
252
253         /* make sure doorbell is rang */
254         wmb();
255
256         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
257                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
258                    qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
259                    p_spq->cid, db.params, db.agg_flags,
260                    qed_chain_get_prod_idx(p_chain));
261
262         return 0;
263 }
264
265 /***************************************************************************
266 * Asynchronous events
267 ***************************************************************************/
268 static int
269 qed_async_event_completion(struct qed_hwfn *p_hwfn,
270                            struct event_ring_entry *p_eqe)
271 {
272         switch (p_eqe->protocol_id) {
273         case PROTOCOLID_ROCE:
274                 qed_async_roce_event(p_hwfn, p_eqe);
275                 return 0;
276         case PROTOCOLID_COMMON:
277                 return qed_sriov_eqe_event(p_hwfn,
278                                            p_eqe->opcode,
279                                            p_eqe->echo, &p_eqe->data);
280         default:
281                 DP_NOTICE(p_hwfn,
282                           "Unknown Async completion for protocol: %d\n",
283                           p_eqe->protocol_id);
284                 return -EINVAL;
285         }
286 }
287
288 /***************************************************************************
289 * EQ API
290 ***************************************************************************/
291 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
292 {
293         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
294                    USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
295
296         REG_WR16(p_hwfn, addr, prod);
297
298         /* keep prod updates ordered */
299         mmiowb();
300 }
301
302 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
303 {
304         struct qed_eq *p_eq = cookie;
305         struct qed_chain *p_chain = &p_eq->chain;
306         int rc = 0;
307
308         /* take a snapshot of the FW consumer */
309         u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
310
311         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
312
313         /* Need to guarantee the fw_cons index we use points to a usuable
314          * element (to comply with our chain), so our macros would comply
315          */
316         if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
317             qed_chain_get_usable_per_page(p_chain))
318                 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
319
320         /* Complete current segment of eq entries */
321         while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
322                 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
323
324                 if (!p_eqe) {
325                         rc = -EINVAL;
326                         break;
327                 }
328
329                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
330                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
331                            p_eqe->opcode,
332                            p_eqe->protocol_id,
333                            p_eqe->reserved0,
334                            le16_to_cpu(p_eqe->echo),
335                            p_eqe->fw_return_code,
336                            p_eqe->flags);
337
338                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
339                         if (qed_async_event_completion(p_hwfn, p_eqe))
340                                 rc = -EINVAL;
341                 } else if (qed_spq_completion(p_hwfn,
342                                               p_eqe->echo,
343                                               p_eqe->fw_return_code,
344                                               &p_eqe->data)) {
345                         rc = -EINVAL;
346                 }
347
348                 qed_chain_recycle_consumed(p_chain);
349         }
350
351         qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
352
353         return rc;
354 }
355
356 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
357 {
358         struct qed_eq *p_eq;
359
360         /* Allocate EQ struct */
361         p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
362         if (!p_eq)
363                 return NULL;
364
365         /* Allocate and initialize EQ chain*/
366         if (qed_chain_alloc(p_hwfn->cdev,
367                             QED_CHAIN_USE_TO_PRODUCE,
368                             QED_CHAIN_MODE_PBL,
369                             QED_CHAIN_CNT_TYPE_U16,
370                             num_elem,
371                             sizeof(union event_ring_element),
372                             &p_eq->chain))
373                 goto eq_allocate_fail;
374
375         /* register EQ completion on the SP SB */
376         qed_int_register_cb(p_hwfn, qed_eq_completion,
377                             p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
378
379         return p_eq;
380
381 eq_allocate_fail:
382         qed_eq_free(p_hwfn, p_eq);
383         return NULL;
384 }
385
386 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
387 {
388         qed_chain_reset(&p_eq->chain);
389 }
390
391 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
392 {
393         if (!p_eq)
394                 return;
395         qed_chain_free(p_hwfn->cdev, &p_eq->chain);
396         kfree(p_eq);
397 }
398
399 /***************************************************************************
400 * CQE API - manipulate EQ functionality
401 ***************************************************************************/
402 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
403                               struct eth_slow_path_rx_cqe *cqe,
404                               enum protocol_type protocol)
405 {
406         if (IS_VF(p_hwfn->cdev))
407                 return 0;
408
409         /* @@@tmp - it's possible we'll eventually want to handle some
410          * actual commands that can arrive here, but for now this is only
411          * used to complete the ramrod using the echo value on the cqe
412          */
413         return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
414 }
415
416 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
417                            struct eth_slow_path_rx_cqe *cqe)
418 {
419         int rc;
420
421         rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
422         if (rc)
423                 DP_NOTICE(p_hwfn,
424                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
425                           cqe->ramrod_cmd_id);
426
427         return rc;
428 }
429
430 /***************************************************************************
431 * Slow hwfn Queue (spq)
432 ***************************************************************************/
433 void qed_spq_setup(struct qed_hwfn *p_hwfn)
434 {
435         struct qed_spq *p_spq = p_hwfn->p_spq;
436         struct qed_spq_entry *p_virt = NULL;
437         dma_addr_t p_phys = 0;
438         u32 i, capacity;
439
440         INIT_LIST_HEAD(&p_spq->pending);
441         INIT_LIST_HEAD(&p_spq->completion_pending);
442         INIT_LIST_HEAD(&p_spq->free_pool);
443         INIT_LIST_HEAD(&p_spq->unlimited_pending);
444         spin_lock_init(&p_spq->lock);
445
446         /* SPQ empty pool */
447         p_phys  = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
448         p_virt  = p_spq->p_virt;
449
450         capacity = qed_chain_get_capacity(&p_spq->chain);
451         for (i = 0; i < capacity; i++) {
452                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
453
454                 list_add_tail(&p_virt->list, &p_spq->free_pool);
455
456                 p_virt++;
457                 p_phys += sizeof(struct qed_spq_entry);
458         }
459
460         /* Statistics */
461         p_spq->normal_count             = 0;
462         p_spq->comp_count               = 0;
463         p_spq->comp_sent_count          = 0;
464         p_spq->unlimited_pending_count  = 0;
465
466         bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
467         p_spq->comp_bitmap_idx = 0;
468
469         /* SPQ cid, cannot fail */
470         qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
471         qed_spq_hw_initialize(p_hwfn, p_spq);
472
473         /* reset the chain itself */
474         qed_chain_reset(&p_spq->chain);
475 }
476
477 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
478 {
479         struct qed_spq_entry *p_virt = NULL;
480         struct qed_spq *p_spq = NULL;
481         dma_addr_t p_phys = 0;
482         u32 capacity;
483
484         /* SPQ struct */
485         p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
486         if (!p_spq)
487                 return -ENOMEM;
488
489         /* SPQ ring  */
490         if (qed_chain_alloc(p_hwfn->cdev,
491                             QED_CHAIN_USE_TO_PRODUCE,
492                             QED_CHAIN_MODE_SINGLE,
493                             QED_CHAIN_CNT_TYPE_U16,
494                             0,   /* N/A when the mode is SINGLE */
495                             sizeof(struct slow_path_element),
496                             &p_spq->chain))
497                 goto spq_allocate_fail;
498
499         /* allocate and fill the SPQ elements (incl. ramrod data list) */
500         capacity = qed_chain_get_capacity(&p_spq->chain);
501         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
502                                     capacity * sizeof(struct qed_spq_entry),
503                                     &p_phys, GFP_KERNEL);
504         if (!p_virt)
505                 goto spq_allocate_fail;
506
507         p_spq->p_virt = p_virt;
508         p_spq->p_phys = p_phys;
509         p_hwfn->p_spq = p_spq;
510
511         return 0;
512
513 spq_allocate_fail:
514         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
515         kfree(p_spq);
516         return -ENOMEM;
517 }
518
519 void qed_spq_free(struct qed_hwfn *p_hwfn)
520 {
521         struct qed_spq *p_spq = p_hwfn->p_spq;
522         u32 capacity;
523
524         if (!p_spq)
525                 return;
526
527         if (p_spq->p_virt) {
528                 capacity = qed_chain_get_capacity(&p_spq->chain);
529                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
530                                   capacity *
531                                   sizeof(struct qed_spq_entry),
532                                   p_spq->p_virt, p_spq->p_phys);
533         }
534
535         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
536         ;
537         kfree(p_spq);
538 }
539
540 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
541 {
542         struct qed_spq *p_spq = p_hwfn->p_spq;
543         struct qed_spq_entry *p_ent = NULL;
544         int rc = 0;
545
546         spin_lock_bh(&p_spq->lock);
547
548         if (list_empty(&p_spq->free_pool)) {
549                 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
550                 if (!p_ent) {
551                         DP_NOTICE(p_hwfn,
552                                   "Failed to allocate an SPQ entry for a pending ramrod\n");
553                         rc = -ENOMEM;
554                         goto out_unlock;
555                 }
556                 p_ent->queue = &p_spq->unlimited_pending;
557         } else {
558                 p_ent = list_first_entry(&p_spq->free_pool,
559                                          struct qed_spq_entry, list);
560                 list_del(&p_ent->list);
561                 p_ent->queue = &p_spq->pending;
562         }
563
564         *pp_ent = p_ent;
565
566 out_unlock:
567         spin_unlock_bh(&p_spq->lock);
568         return rc;
569 }
570
571 /* Locked variant; Should be called while the SPQ lock is taken */
572 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
573                                    struct qed_spq_entry *p_ent)
574 {
575         list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
576 }
577
578 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
579 {
580         spin_lock_bh(&p_hwfn->p_spq->lock);
581         __qed_spq_return_entry(p_hwfn, p_ent);
582         spin_unlock_bh(&p_hwfn->p_spq->lock);
583 }
584
585 /**
586  * @brief qed_spq_add_entry - adds a new entry to the pending
587  *        list. Should be used while lock is being held.
588  *
589  * Addes an entry to the pending list is there is room (en empty
590  * element is available in the free_pool), or else places the
591  * entry in the unlimited_pending pool.
592  *
593  * @param p_hwfn
594  * @param p_ent
595  * @param priority
596  *
597  * @return int
598  */
599 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
600                              struct qed_spq_entry *p_ent,
601                              enum spq_priority priority)
602 {
603         struct qed_spq *p_spq = p_hwfn->p_spq;
604
605         if (p_ent->queue == &p_spq->unlimited_pending) {
606
607                 if (list_empty(&p_spq->free_pool)) {
608                         list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
609                         p_spq->unlimited_pending_count++;
610
611                         return 0;
612                 } else {
613                         struct qed_spq_entry *p_en2;
614
615                         p_en2 = list_first_entry(&p_spq->free_pool,
616                                                  struct qed_spq_entry, list);
617                         list_del(&p_en2->list);
618
619                         /* Copy the ring element physical pointer to the new
620                          * entry, since we are about to override the entire ring
621                          * entry and don't want to lose the pointer.
622                          */
623                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
624
625                         *p_en2 = *p_ent;
626
627                         /* EBLOCK responsible to free the allocated p_ent */
628                         if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
629                                 kfree(p_ent);
630
631                         p_ent = p_en2;
632                 }
633         }
634
635         /* entry is to be placed in 'pending' queue */
636         switch (priority) {
637         case QED_SPQ_PRIORITY_NORMAL:
638                 list_add_tail(&p_ent->list, &p_spq->pending);
639                 p_spq->normal_count++;
640                 break;
641         case QED_SPQ_PRIORITY_HIGH:
642                 list_add(&p_ent->list, &p_spq->pending);
643                 p_spq->high_count++;
644                 break;
645         default:
646                 return -EINVAL;
647         }
648
649         return 0;
650 }
651
652 /***************************************************************************
653 * Accessor
654 ***************************************************************************/
655 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
656 {
657         if (!p_hwfn->p_spq)
658                 return 0xffffffff;      /* illegal */
659         return p_hwfn->p_spq->cid;
660 }
661
662 /***************************************************************************
663 * Posting new Ramrods
664 ***************************************************************************/
665 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
666                              struct list_head *head, u32 keep_reserve)
667 {
668         struct qed_spq *p_spq = p_hwfn->p_spq;
669         int rc;
670
671         while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
672                !list_empty(head)) {
673                 struct qed_spq_entry *p_ent =
674                         list_first_entry(head, struct qed_spq_entry, list);
675                 list_del(&p_ent->list);
676                 list_add_tail(&p_ent->list, &p_spq->completion_pending);
677                 p_spq->comp_sent_count++;
678
679                 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
680                 if (rc) {
681                         list_del(&p_ent->list);
682                         __qed_spq_return_entry(p_hwfn, p_ent);
683                         return rc;
684                 }
685         }
686
687         return 0;
688 }
689
690 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
691 {
692         struct qed_spq *p_spq = p_hwfn->p_spq;
693         struct qed_spq_entry *p_ent = NULL;
694
695         while (!list_empty(&p_spq->free_pool)) {
696                 if (list_empty(&p_spq->unlimited_pending))
697                         break;
698
699                 p_ent = list_first_entry(&p_spq->unlimited_pending,
700                                          struct qed_spq_entry, list);
701                 if (!p_ent)
702                         return -EINVAL;
703
704                 list_del(&p_ent->list);
705
706                 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
707         }
708
709         return qed_spq_post_list(p_hwfn, &p_spq->pending,
710                                  SPQ_HIGH_PRI_RESERVE_DEFAULT);
711 }
712
713 int qed_spq_post(struct qed_hwfn *p_hwfn,
714                  struct qed_spq_entry *p_ent, u8 *fw_return_code)
715 {
716         int rc = 0;
717         struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
718         bool b_ret_ent = true;
719
720         if (!p_hwfn)
721                 return -EINVAL;
722
723         if (!p_ent) {
724                 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
725                 return -EINVAL;
726         }
727
728         /* Complete the entry */
729         rc = qed_spq_fill_entry(p_hwfn, p_ent);
730
731         spin_lock_bh(&p_spq->lock);
732
733         /* Check return value after LOCK is taken for cleaner error flow */
734         if (rc)
735                 goto spq_post_fail;
736
737         /* Add the request to the pending queue */
738         rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
739         if (rc)
740                 goto spq_post_fail;
741
742         rc = qed_spq_pend_post(p_hwfn);
743         if (rc) {
744                 /* Since it's possible that pending failed for a different
745                  * entry [although unlikely], the failed entry was already
746                  * dealt with; No need to return it here.
747                  */
748                 b_ret_ent = false;
749                 goto spq_post_fail;
750         }
751
752         spin_unlock_bh(&p_spq->lock);
753
754         if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
755                 /* For entries in QED BLOCK mode, the completion code cannot
756                  * perform the necessary cleanup - if it did, we couldn't
757                  * access p_ent here to see whether it's successful or not.
758                  * Thus, after gaining the answer perform the cleanup here.
759                  */
760                 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code,
761                                    p_ent->queue == &p_spq->unlimited_pending);
762
763                 if (p_ent->queue == &p_spq->unlimited_pending) {
764                         /* This is an allocated p_ent which does not need to
765                          * return to pool.
766                          */
767                         kfree(p_ent);
768                         return rc;
769                 }
770
771                 if (rc)
772                         goto spq_post_fail2;
773
774                 /* return to pool */
775                 qed_spq_return_entry(p_hwfn, p_ent);
776         }
777         return rc;
778
779 spq_post_fail2:
780         spin_lock_bh(&p_spq->lock);
781         list_del(&p_ent->list);
782         qed_chain_return_produced(&p_spq->chain);
783
784 spq_post_fail:
785         /* return to the free pool */
786         if (b_ret_ent)
787                 __qed_spq_return_entry(p_hwfn, p_ent);
788         spin_unlock_bh(&p_spq->lock);
789
790         return rc;
791 }
792
793 int qed_spq_completion(struct qed_hwfn *p_hwfn,
794                        __le16 echo,
795                        u8 fw_return_code,
796                        union event_ring_data *p_data)
797 {
798         struct qed_spq          *p_spq;
799         struct qed_spq_entry    *p_ent = NULL;
800         struct qed_spq_entry    *tmp;
801         struct qed_spq_entry    *found = NULL;
802         int                     rc;
803
804         if (!p_hwfn)
805                 return -EINVAL;
806
807         p_spq = p_hwfn->p_spq;
808         if (!p_spq)
809                 return -EINVAL;
810
811         spin_lock_bh(&p_spq->lock);
812         list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
813                 if (p_ent->elem.hdr.echo == echo) {
814                         u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
815
816                         list_del(&p_ent->list);
817
818                         /* Avoid overriding of SPQ entries when getting
819                          * out-of-order completions, by marking the completions
820                          * in a bitmap and increasing the chain consumer only
821                          * for the first successive completed entries.
822                          */
823                         __set_bit(pos, p_spq->p_comp_bitmap);
824
825                         while (test_bit(p_spq->comp_bitmap_idx,
826                                         p_spq->p_comp_bitmap)) {
827                                 __clear_bit(p_spq->comp_bitmap_idx,
828                                             p_spq->p_comp_bitmap);
829                                 p_spq->comp_bitmap_idx++;
830                                 qed_chain_return_produced(&p_spq->chain);
831                         }
832
833                         p_spq->comp_count++;
834                         found = p_ent;
835                         break;
836                 }
837
838                 /* This is relatively uncommon - depends on scenarios
839                  * which have mutliple per-PF sent ramrods.
840                  */
841                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
842                            "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
843                            le16_to_cpu(echo),
844                            le16_to_cpu(p_ent->elem.hdr.echo));
845         }
846
847         /* Release lock before callback, as callback may post
848          * an additional ramrod.
849          */
850         spin_unlock_bh(&p_spq->lock);
851
852         if (!found) {
853                 DP_NOTICE(p_hwfn,
854                           "Failed to find an entry this EQE [echo %04x] completes\n",
855                           le16_to_cpu(echo));
856                 return -EEXIST;
857         }
858
859         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
860                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
861                    le16_to_cpu(echo),
862                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
863         if (found->comp_cb.function)
864                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
865                                         fw_return_code);
866         else
867                 DP_VERBOSE(p_hwfn,
868                            QED_MSG_SPQ,
869                            "Got a completion without a callback function\n");
870
871         if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
872             (found->queue == &p_spq->unlimited_pending))
873                 /* EBLOCK  is responsible for returning its own entry into the
874                  * free list, unless it originally added the entry into the
875                  * unlimited pending list.
876                  */
877                 qed_spq_return_entry(p_hwfn, found);
878
879         /* Attempt to post pending requests */
880         spin_lock_bh(&p_spq->lock);
881         rc = qed_spq_pend_post(p_hwfn);
882         spin_unlock_bh(&p_spq->lock);
883
884         return rc;
885 }
886
887 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
888 {
889         struct qed_consq *p_consq;
890
891         /* Allocate ConsQ struct */
892         p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
893         if (!p_consq)
894                 return NULL;
895
896         /* Allocate and initialize EQ chain*/
897         if (qed_chain_alloc(p_hwfn->cdev,
898                             QED_CHAIN_USE_TO_PRODUCE,
899                             QED_CHAIN_MODE_PBL,
900                             QED_CHAIN_CNT_TYPE_U16,
901                             QED_CHAIN_PAGE_SIZE / 0x80,
902                             0x80, &p_consq->chain))
903                 goto consq_allocate_fail;
904
905         return p_consq;
906
907 consq_allocate_fail:
908         qed_consq_free(p_hwfn, p_consq);
909         return NULL;
910 }
911
912 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
913 {
914         qed_chain_reset(&p_consq->chain);
915 }
916
917 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
918 {
919         if (!p_consq)
920                 return;
921         qed_chain_free(p_hwfn->cdev, &p_consq->chain);
922         kfree(p_consq);
923 }