1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
8 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/wait.h>
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/log2.h>
16 #include <linux/string.h>
23 #include "resources.h"
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
30 enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
39 static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
46 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
53 struct mlxsw_pci_mem_item {
59 struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
71 struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
87 enum mlxsw_pci_cqe_v v;
97 struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
103 struct pci_dev *pdev;
105 u64 free_running_clock_offset;
106 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
108 struct mlxsw_core *core;
110 struct mlxsw_pci_mem_item *items;
114 struct mlxsw_pci_mem_item out_mbox;
115 struct mlxsw_pci_mem_item in_mbox;
116 struct mutex lock; /* Lock access to command registers */
118 wait_queue_head_t wait;
125 struct mlxsw_bus_info bus_info;
126 const struct pci_device_id *id;
127 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
128 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
131 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
133 tasklet_schedule(&q->tasklet);
136 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
137 size_t elem_size, int elem_index)
139 return q->mem_item.buf + (elem_size * elem_index);
142 static struct mlxsw_pci_queue_elem_info *
143 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
145 return &q->elem_info[elem_index];
148 static struct mlxsw_pci_queue_elem_info *
149 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
151 int index = q->producer_counter & (q->count - 1);
153 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
155 return mlxsw_pci_queue_elem_info_get(q, index);
158 static struct mlxsw_pci_queue_elem_info *
159 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
161 int index = q->consumer_counter & (q->count - 1);
163 return mlxsw_pci_queue_elem_info_get(q, index);
166 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
168 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
171 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
173 return owner_bit != !!(q->consumer_counter & q->count);
176 static struct mlxsw_pci_queue_type_group *
177 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
178 enum mlxsw_pci_queue_type q_type)
180 return &mlxsw_pci->queues[q_type];
183 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
184 enum mlxsw_pci_queue_type q_type)
186 struct mlxsw_pci_queue_type_group *queue_group;
188 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
189 return queue_group->count;
192 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
194 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
197 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
199 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
202 static struct mlxsw_pci_queue *
203 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
204 enum mlxsw_pci_queue_type q_type, u8 q_num)
206 return &mlxsw_pci->queues[q_type].q[q_num];
209 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
212 return __mlxsw_pci_queue_get(mlxsw_pci,
213 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
216 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
219 return __mlxsw_pci_queue_get(mlxsw_pci,
220 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
223 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
226 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
229 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
232 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
235 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
236 struct mlxsw_pci_queue *q,
239 mlxsw_pci_write32(mlxsw_pci,
240 DOORBELL(mlxsw_pci->doorbell_offset,
241 mlxsw_pci_doorbell_type_offset[q->type],
245 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
246 struct mlxsw_pci_queue *q,
249 mlxsw_pci_write32(mlxsw_pci,
250 DOORBELL(mlxsw_pci->doorbell_offset,
251 mlxsw_pci_doorbell_arm_type_offset[q->type],
255 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
256 struct mlxsw_pci_queue *q)
258 wmb(); /* ensure all writes are done before we ring a bell */
259 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
262 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
263 struct mlxsw_pci_queue *q)
265 wmb(); /* ensure all writes are done before we ring a bell */
266 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
267 q->consumer_counter + q->count);
271 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
272 struct mlxsw_pci_queue *q)
274 wmb(); /* ensure all writes are done before we ring a bell */
275 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
278 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
281 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
284 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
285 struct mlxsw_pci_queue *q)
291 q->producer_counter = 0;
292 q->consumer_counter = 0;
293 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
294 MLXSW_PCI_SDQ_CTL_TC;
296 /* Set CQ of same number of this SDQ. */
297 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
298 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
299 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
300 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
301 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
303 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
306 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
309 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
313 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
314 struct mlxsw_pci_queue *q)
316 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
319 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
320 int index, char *frag_data, size_t frag_len,
323 struct pci_dev *pdev = mlxsw_pci->pdev;
326 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
327 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
328 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
331 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
332 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
336 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
337 int index, int direction)
339 struct pci_dev *pdev = mlxsw_pci->pdev;
340 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
341 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
345 pci_unmap_single(pdev, mapaddr, frag_len, direction);
348 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
349 struct mlxsw_pci_queue_elem_info *elem_info)
351 size_t buf_len = MLXSW_PORT_MAX_MTU;
352 char *wqe = elem_info->elem;
356 elem_info->u.rdq.skb = NULL;
357 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
361 /* Assume that wqe was previously zeroed. */
363 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
364 buf_len, DMA_FROM_DEVICE);
368 elem_info->u.rdq.skb = skb;
372 dev_kfree_skb_any(skb);
376 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
377 struct mlxsw_pci_queue_elem_info *elem_info)
382 skb = elem_info->u.rdq.skb;
383 wqe = elem_info->elem;
385 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
386 dev_kfree_skb_any(skb);
389 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
390 struct mlxsw_pci_queue *q)
392 struct mlxsw_pci_queue_elem_info *elem_info;
393 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
397 q->producer_counter = 0;
398 q->consumer_counter = 0;
400 /* Set CQ of same number of this RDQ with base
401 * above SDQ count as the lower ones are assigned to SDQs.
403 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
404 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
405 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
406 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
408 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
411 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
415 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
417 for (i = 0; i < q->count; i++) {
418 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
420 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
423 /* Everything is set up, ring doorbell to pass elem to HW */
424 q->producer_counter++;
425 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
431 for (i--; i >= 0; i--) {
432 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
433 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
435 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
440 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
441 struct mlxsw_pci_queue *q)
443 struct mlxsw_pci_queue_elem_info *elem_info;
446 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
447 for (i = 0; i < q->count; i++) {
448 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
449 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
453 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
454 struct mlxsw_pci_queue *q)
456 q->u.cq.v = mlxsw_pci->max_cqe_ver;
458 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
459 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
460 q->num < mlxsw_pci->num_sdq_cqs)
461 q->u.cq.v = MLXSW_PCI_CQE_V1;
464 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
465 struct mlxsw_pci_queue *q)
470 q->consumer_counter = 0;
472 for (i = 0; i < q->count; i++) {
473 char *elem = mlxsw_pci_queue_elem_get(q, i);
475 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
478 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
479 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
480 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
481 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
482 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
483 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
485 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
486 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
487 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
488 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
489 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
491 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
493 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
496 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
497 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
501 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
502 struct mlxsw_pci_queue *q)
504 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
507 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
508 struct mlxsw_pci_queue *q,
509 u16 consumer_counter_limit,
512 struct pci_dev *pdev = mlxsw_pci->pdev;
513 struct mlxsw_pci_queue_elem_info *elem_info;
514 struct mlxsw_tx_info tx_info;
520 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
521 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
522 skb = elem_info->u.sdq.skb;
523 wqe = elem_info->elem;
524 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
525 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
527 if (unlikely(!tx_info.is_emad &&
528 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
529 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
535 dev_kfree_skb_any(skb);
536 elem_info->u.sdq.skb = NULL;
538 if (q->consumer_counter++ != consumer_counter_limit)
539 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
540 spin_unlock(&q->lock);
543 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
544 struct mlxsw_pci_queue *q,
545 u16 consumer_counter_limit,
546 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
548 struct pci_dev *pdev = mlxsw_pci->pdev;
549 struct mlxsw_pci_queue_elem_info *elem_info;
552 struct mlxsw_rx_info rx_info;
556 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
557 skb = elem_info->u.sdq.skb;
560 wqe = elem_info->elem;
561 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
563 if (q->consumer_counter++ != consumer_counter_limit)
564 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
566 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
567 rx_info.is_lag = true;
568 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
569 rx_info.lag_port_index =
570 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
572 rx_info.is_lag = false;
573 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
576 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
578 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
579 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
580 byte_count -= ETH_FCS_LEN;
581 skb_put(skb, byte_count);
582 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
584 memset(wqe, 0, q->elem_size);
585 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
587 dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
588 /* Everything is set up, ring doorbell to pass elem to HW */
589 q->producer_counter++;
590 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
594 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
596 struct mlxsw_pci_queue_elem_info *elem_info;
600 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
601 elem = elem_info->elem;
602 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
603 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
605 q->consumer_counter++;
606 rmb(); /* make sure we read owned bit before the rest of elem */
610 static void mlxsw_pci_cq_tasklet(unsigned long data)
612 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
613 struct mlxsw_pci *mlxsw_pci = q->pci;
616 int credits = q->count >> 1;
618 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
619 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
620 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
621 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
622 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
624 memcpy(ncqe, cqe, q->elem_size);
625 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
628 struct mlxsw_pci_queue *sdq;
630 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
631 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
633 q->u.cq.comp_sdq_count++;
635 struct mlxsw_pci_queue *rdq;
637 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
638 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
639 wqe_counter, q->u.cq.v, ncqe);
640 q->u.cq.comp_rdq_count++;
642 if (++items == credits)
646 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
649 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
651 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
652 MLXSW_PCI_CQE01_COUNT;
655 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
657 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
658 MLXSW_PCI_CQE01_SIZE;
661 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
662 struct mlxsw_pci_queue *q)
667 q->consumer_counter = 0;
669 for (i = 0; i < q->count; i++) {
670 char *elem = mlxsw_pci_queue_elem_get(q, i);
672 mlxsw_pci_eqe_owner_set(elem, 1);
675 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
676 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
677 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
678 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
679 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
681 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
683 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
686 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
687 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
691 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
692 struct mlxsw_pci_queue *q)
694 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
697 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
699 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
700 mlxsw_pci->cmd.comp.out_param =
701 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
702 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
703 mlxsw_pci->cmd.wait_done = true;
704 wake_up(&mlxsw_pci->cmd.wait);
707 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
709 struct mlxsw_pci_queue_elem_info *elem_info;
713 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
714 elem = elem_info->elem;
715 owner_bit = mlxsw_pci_eqe_owner_get(elem);
716 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
718 q->consumer_counter++;
719 rmb(); /* make sure we read owned bit before the rest of elem */
723 static void mlxsw_pci_eq_tasklet(unsigned long data)
725 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
726 struct mlxsw_pci *mlxsw_pci = q->pci;
727 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
728 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
731 bool cq_handle = false;
733 int credits = q->count >> 1;
735 memset(&active_cqns, 0, sizeof(active_cqns));
737 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
739 /* Command interface completion events are always received on
740 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
741 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
744 case MLXSW_PCI_EQ_ASYNC_NUM:
745 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
746 q->u.eq.ev_cmd_count++;
748 case MLXSW_PCI_EQ_COMP_NUM:
749 cqn = mlxsw_pci_eqe_cqn_get(eqe);
750 set_bit(cqn, active_cqns);
752 q->u.eq.ev_comp_count++;
755 q->u.eq.ev_other_count++;
757 if (++items == credits)
761 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
762 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
767 for_each_set_bit(cqn, active_cqns, cq_count) {
768 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
769 mlxsw_pci_queue_tasklet_schedule(q);
773 struct mlxsw_pci_queue_ops {
775 enum mlxsw_pci_queue_type type;
776 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
777 struct mlxsw_pci_queue *q);
778 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
779 struct mlxsw_pci_queue *q);
780 void (*fini)(struct mlxsw_pci *mlxsw_pci,
781 struct mlxsw_pci_queue *q);
782 void (*tasklet)(unsigned long data);
783 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
784 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
789 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
790 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
791 .init = mlxsw_pci_sdq_init,
792 .fini = mlxsw_pci_sdq_fini,
793 .elem_count = MLXSW_PCI_WQE_COUNT,
794 .elem_size = MLXSW_PCI_WQE_SIZE,
797 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
798 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
799 .init = mlxsw_pci_rdq_init,
800 .fini = mlxsw_pci_rdq_fini,
801 .elem_count = MLXSW_PCI_WQE_COUNT,
802 .elem_size = MLXSW_PCI_WQE_SIZE
805 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
806 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
807 .pre_init = mlxsw_pci_cq_pre_init,
808 .init = mlxsw_pci_cq_init,
809 .fini = mlxsw_pci_cq_fini,
810 .tasklet = mlxsw_pci_cq_tasklet,
811 .elem_count_f = mlxsw_pci_cq_elem_count,
812 .elem_size_f = mlxsw_pci_cq_elem_size
815 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
816 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
817 .init = mlxsw_pci_eq_init,
818 .fini = mlxsw_pci_eq_fini,
819 .tasklet = mlxsw_pci_eq_tasklet,
820 .elem_count = MLXSW_PCI_EQE_COUNT,
821 .elem_size = MLXSW_PCI_EQE_SIZE
824 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
825 const struct mlxsw_pci_queue_ops *q_ops,
826 struct mlxsw_pci_queue *q, u8 q_num)
828 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
834 q_ops->pre_init(mlxsw_pci, q);
836 spin_lock_init(&q->lock);
837 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
839 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
841 q->type = q_ops->type;
845 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
847 mem_item->size = MLXSW_PCI_AQ_SIZE;
848 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
854 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
857 goto err_elem_info_alloc;
860 /* Initialize dma mapped elements info elem_info for
861 * future easy access.
863 for (i = 0; i < q->count; i++) {
864 struct mlxsw_pci_queue_elem_info *elem_info;
866 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
868 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
871 mlxsw_cmd_mbox_zero(mbox);
872 err = q_ops->init(mlxsw_pci, mbox, q);
880 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
881 mem_item->buf, mem_item->mapaddr);
885 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
886 const struct mlxsw_pci_queue_ops *q_ops,
887 struct mlxsw_pci_queue *q)
889 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
891 q_ops->fini(mlxsw_pci, q);
893 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
894 mem_item->buf, mem_item->mapaddr);
897 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
898 const struct mlxsw_pci_queue_ops *q_ops,
901 struct mlxsw_pci_queue_type_group *queue_group;
905 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
906 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
910 for (i = 0; i < num_qs; i++) {
911 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
912 &queue_group->q[i], i);
916 queue_group->count = num_qs;
921 for (i--; i >= 0; i--)
922 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
923 kfree(queue_group->q);
927 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
928 const struct mlxsw_pci_queue_ops *q_ops)
930 struct mlxsw_pci_queue_type_group *queue_group;
933 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
934 for (i = 0; i < queue_group->count; i++)
935 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
936 kfree(queue_group->q);
939 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
941 struct pci_dev *pdev = mlxsw_pci->pdev;
953 mlxsw_cmd_mbox_zero(mbox);
954 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
958 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
959 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
960 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
961 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
962 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
963 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
964 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
965 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
966 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
968 if (num_sdqs + num_rdqs > num_cqs ||
969 num_sdqs < MLXSW_PCI_SDQS_MIN ||
970 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
971 dev_err(&pdev->dev, "Unsupported number of queues\n");
975 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
976 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
977 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
978 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
979 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
980 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
981 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
985 mlxsw_pci->num_sdq_cqs = num_sdqs;
987 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
990 dev_err(&pdev->dev, "Failed to initialize event queues\n");
994 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
997 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1001 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1004 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1008 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1011 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1015 /* We have to poll in command interface until queues are initialized */
1016 mlxsw_pci->cmd.nopoll = true;
1020 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1022 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1024 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1028 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1030 mlxsw_pci->cmd.nopoll = false;
1031 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1032 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1033 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1034 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1038 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1039 char *mbox, int index,
1040 const struct mlxsw_swid_config *swid)
1044 if (swid->used_type) {
1045 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1046 mbox, index, swid->type);
1049 if (swid->used_properties) {
1050 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1051 mbox, index, swid->properties);
1054 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1058 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1059 const struct mlxsw_config_profile *profile,
1060 struct mlxsw_res *res)
1062 u64 single_size, double_size, linear_size;
1065 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1066 &single_size, &double_size,
1071 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1072 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1073 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1078 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1079 const struct mlxsw_config_profile *profile,
1080 struct mlxsw_res *res)
1085 mlxsw_cmd_mbox_zero(mbox);
1087 if (profile->used_max_vepa_channels) {
1088 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1090 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1091 mbox, profile->max_vepa_channels);
1093 if (profile->used_max_mid) {
1094 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1096 mlxsw_cmd_mbox_config_profile_max_mid_set(
1097 mbox, profile->max_mid);
1099 if (profile->used_max_pgt) {
1100 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1102 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1103 mbox, profile->max_pgt);
1105 if (profile->used_max_system_port) {
1106 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1108 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1109 mbox, profile->max_system_port);
1111 if (profile->used_max_vlan_groups) {
1112 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1114 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1115 mbox, profile->max_vlan_groups);
1117 if (profile->used_max_regions) {
1118 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1120 mlxsw_cmd_mbox_config_profile_max_regions_set(
1121 mbox, profile->max_regions);
1123 if (profile->used_flood_tables) {
1124 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1126 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1127 mbox, profile->max_flood_tables);
1128 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1129 mbox, profile->max_vid_flood_tables);
1130 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1131 mbox, profile->max_fid_offset_flood_tables);
1132 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1133 mbox, profile->fid_offset_flood_table_size);
1134 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1135 mbox, profile->max_fid_flood_tables);
1136 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1137 mbox, profile->fid_flood_table_size);
1139 if (profile->used_flood_mode) {
1140 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1142 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1143 mbox, profile->flood_mode);
1145 if (profile->used_max_ib_mc) {
1146 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1148 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1149 mbox, profile->max_ib_mc);
1151 if (profile->used_max_pkey) {
1152 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1154 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1155 mbox, profile->max_pkey);
1157 if (profile->used_ar_sec) {
1158 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1160 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1161 mbox, profile->ar_sec);
1163 if (profile->used_adaptive_routing_group_cap) {
1164 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1166 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1167 mbox, profile->adaptive_routing_group_cap);
1169 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1170 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1174 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1175 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1176 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1177 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1179 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1180 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1181 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1183 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1184 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1187 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1188 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1189 &profile->swid_config[i]);
1191 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1192 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1193 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1196 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1199 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1201 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1204 mlxsw_cmd_mbox_zero(mbox);
1205 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1208 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1209 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1213 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1216 struct mlxsw_pci_mem_item *mem_item;
1221 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1223 if (!mlxsw_pci->fw_area.items)
1225 mlxsw_pci->fw_area.count = num_pages;
1227 mlxsw_cmd_mbox_zero(mbox);
1228 for (i = 0; i < num_pages; i++) {
1229 mem_item = &mlxsw_pci->fw_area.items[i];
1231 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1232 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1234 &mem_item->mapaddr);
1235 if (!mem_item->buf) {
1239 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1240 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1241 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1242 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1244 goto err_cmd_map_fa;
1246 mlxsw_cmd_mbox_zero(mbox);
1251 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1253 goto err_cmd_map_fa;
1260 for (i--; i >= 0; i--) {
1261 mem_item = &mlxsw_pci->fw_area.items[i];
1263 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1264 mem_item->buf, mem_item->mapaddr);
1266 kfree(mlxsw_pci->fw_area.items);
1270 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1272 struct mlxsw_pci_mem_item *mem_item;
1275 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1277 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1278 mem_item = &mlxsw_pci->fw_area.items[i];
1280 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1281 mem_item->buf, mem_item->mapaddr);
1283 kfree(mlxsw_pci->fw_area.items);
1286 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1288 struct mlxsw_pci *mlxsw_pci = dev_id;
1289 struct mlxsw_pci_queue *q;
1292 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1293 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1294 mlxsw_pci_queue_tasklet_schedule(q);
1299 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1300 struct mlxsw_pci_mem_item *mbox)
1302 struct pci_dev *pdev = mlxsw_pci->pdev;
1305 mbox->size = MLXSW_CMD_MBOX_SIZE;
1306 mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1309 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1316 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1317 struct mlxsw_pci_mem_item *mbox)
1319 struct pci_dev *pdev = mlxsw_pci->pdev;
1321 pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1325 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1326 const struct pci_device_id *id,
1332 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1333 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1337 /* We must wait for the HW to become responsive. */
1338 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1340 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1342 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1343 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1346 } while (time_before(jiffies, end));
1348 *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1353 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1354 const struct pci_device_id *id)
1356 struct pci_dev *pdev = mlxsw_pci->pdev;
1357 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1361 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1363 dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1368 mlxsw_reg_mrsr_pack(mrsr_pl);
1369 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1373 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1375 dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1383 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1387 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1389 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1393 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1395 pci_free_irq_vectors(mlxsw_pci->pdev);
1398 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1399 const struct mlxsw_config_profile *profile,
1400 struct mlxsw_res *res)
1402 struct mlxsw_pci *mlxsw_pci = bus_priv;
1403 struct pci_dev *pdev = mlxsw_pci->pdev;
1408 mutex_init(&mlxsw_pci->cmd.lock);
1409 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1411 mlxsw_pci->core = mlxsw_core;
1413 mbox = mlxsw_cmd_mbox_alloc();
1417 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1421 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1423 goto err_out_mbox_alloc;
1425 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1429 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1431 dev_err(&pdev->dev, "MSI-X init failed\n");
1435 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1439 mlxsw_pci->bus_info.fw_rev.major =
1440 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1441 mlxsw_pci->bus_info.fw_rev.minor =
1442 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1443 mlxsw_pci->bus_info.fw_rev.subminor =
1444 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1446 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1447 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1451 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1452 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1454 goto err_doorbell_page_bar;
1457 mlxsw_pci->doorbell_offset =
1458 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1460 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1461 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1463 goto err_fr_rn_clk_bar;
1466 mlxsw_pci->free_running_clock_offset =
1467 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1469 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1470 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1472 goto err_fw_area_init;
1474 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1478 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1480 goto err_query_resources;
1482 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1483 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1484 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1485 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1486 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1487 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1488 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1489 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1490 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1491 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1493 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1494 goto err_cqe_v_check;
1497 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1499 goto err_config_profile;
1501 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1505 err = request_irq(pci_irq_vector(pdev, 0),
1506 mlxsw_pci_eq_irq_handler, 0,
1507 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1509 dev_err(&pdev->dev, "IRQ request failed\n");
1510 goto err_request_eq_irq;
1516 mlxsw_pci_aqs_fini(mlxsw_pci);
1520 err_query_resources:
1522 mlxsw_pci_fw_area_fini(mlxsw_pci);
1525 err_doorbell_page_bar:
1528 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1531 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1533 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1535 mlxsw_cmd_mbox_free(mbox);
1539 static void mlxsw_pci_fini(void *bus_priv)
1541 struct mlxsw_pci *mlxsw_pci = bus_priv;
1543 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1544 mlxsw_pci_aqs_fini(mlxsw_pci);
1545 mlxsw_pci_fw_area_fini(mlxsw_pci);
1546 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1547 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1548 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1551 static struct mlxsw_pci_queue *
1552 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1553 const struct mlxsw_tx_info *tx_info)
1555 u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1558 if (tx_info->is_emad) {
1559 sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1561 BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1562 sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1565 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1568 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1569 const struct mlxsw_tx_info *tx_info)
1571 struct mlxsw_pci *mlxsw_pci = bus_priv;
1572 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1574 return !mlxsw_pci_queue_elem_info_producer_get(q);
1577 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1578 const struct mlxsw_tx_info *tx_info)
1580 struct mlxsw_pci *mlxsw_pci = bus_priv;
1581 struct mlxsw_pci_queue *q;
1582 struct mlxsw_pci_queue_elem_info *elem_info;
1587 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1588 err = skb_linearize(skb);
1593 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1594 spin_lock_bh(&q->lock);
1595 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1601 mlxsw_skb_cb(skb)->tx_info = *tx_info;
1602 elem_info->u.sdq.skb = skb;
1604 wqe = elem_info->elem;
1605 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1606 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1607 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1609 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1610 skb_headlen(skb), DMA_TO_DEVICE);
1614 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1615 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1617 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1618 skb_frag_address(frag),
1619 skb_frag_size(frag),
1625 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1626 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1628 /* Set unused sq entries byte count to zero. */
1629 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1630 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1632 /* Everything is set up, ring producer doorbell to get HW going */
1633 q->producer_counter++;
1634 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1640 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1642 spin_unlock_bh(&q->lock);
1646 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1647 u32 in_mod, bool out_mbox_direct,
1648 char *in_mbox, size_t in_mbox_size,
1649 char *out_mbox, size_t out_mbox_size,
1652 struct mlxsw_pci *mlxsw_pci = bus_priv;
1653 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1654 bool evreq = mlxsw_pci->cmd.nopoll;
1655 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1656 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1659 *p_status = MLXSW_CMD_STATUS_OK;
1661 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1666 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1667 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1669 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1670 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1673 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1674 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1675 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1677 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1678 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1680 *p_wait_done = false;
1682 wmb(); /* all needs to be written before we write control register */
1683 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1684 MLXSW_PCI_CIR_CTRL_GO_BIT |
1685 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1686 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1692 end = jiffies + timeout;
1694 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1696 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1697 *p_wait_done = true;
1698 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1702 } while (time_before(jiffies, end));
1704 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1705 *p_status = mlxsw_pci->cmd.comp.status;
1716 if (!err && out_mbox && out_mbox_direct) {
1717 /* Some commands don't use output param as address to mailbox
1718 * but they store output directly into registers. In that case,
1719 * copy registers into mbox buffer.
1724 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1726 memcpy(out_mbox, &tmp, sizeof(tmp));
1727 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1729 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1731 } else if (!err && out_mbox) {
1732 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1735 mutex_unlock(&mlxsw_pci->cmd.lock);
1740 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1742 struct mlxsw_pci *mlxsw_pci = bus_priv;
1745 frc_offset = mlxsw_pci->free_running_clock_offset;
1746 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
1749 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1751 struct mlxsw_pci *mlxsw_pci = bus_priv;
1754 frc_offset = mlxsw_pci->free_running_clock_offset;
1755 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
1758 static const struct mlxsw_bus mlxsw_pci_bus = {
1760 .init = mlxsw_pci_init,
1761 .fini = mlxsw_pci_fini,
1762 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1763 .skb_transmit = mlxsw_pci_skb_transmit,
1764 .cmd_exec = mlxsw_pci_cmd_exec,
1765 .read_frc_h = mlxsw_pci_read_frc_h,
1766 .read_frc_l = mlxsw_pci_read_frc_l,
1767 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
1770 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1772 const char *driver_name = pdev->driver->name;
1773 struct mlxsw_pci *mlxsw_pci;
1776 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1780 err = pci_enable_device(pdev);
1782 dev_err(&pdev->dev, "pci_enable_device failed\n");
1783 goto err_pci_enable_device;
1786 err = pci_request_regions(pdev, driver_name);
1788 dev_err(&pdev->dev, "pci_request_regions failed\n");
1789 goto err_pci_request_regions;
1792 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1794 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1796 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1797 goto err_pci_set_dma_mask;
1800 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1802 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1803 goto err_pci_set_dma_mask;
1807 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1808 dev_err(&pdev->dev, "invalid PCI region size\n");
1810 goto err_pci_resource_len_check;
1813 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1814 pci_resource_len(pdev, 0));
1815 if (!mlxsw_pci->hw_addr) {
1816 dev_err(&pdev->dev, "ioremap failed\n");
1820 pci_set_master(pdev);
1822 mlxsw_pci->pdev = pdev;
1823 pci_set_drvdata(pdev, mlxsw_pci);
1825 mlxsw_pci->bus_info.device_kind = driver_name;
1826 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1827 mlxsw_pci->bus_info.dev = &pdev->dev;
1828 mlxsw_pci->bus_info.read_frc_capable = true;
1831 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1832 &mlxsw_pci_bus, mlxsw_pci, false,
1835 dev_err(&pdev->dev, "cannot register bus device\n");
1836 goto err_bus_device_register;
1841 err_bus_device_register:
1842 iounmap(mlxsw_pci->hw_addr);
1844 err_pci_resource_len_check:
1845 err_pci_set_dma_mask:
1846 pci_release_regions(pdev);
1847 err_pci_request_regions:
1848 pci_disable_device(pdev);
1849 err_pci_enable_device:
1854 static void mlxsw_pci_remove(struct pci_dev *pdev)
1856 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1858 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
1859 iounmap(mlxsw_pci->hw_addr);
1860 pci_release_regions(mlxsw_pci->pdev);
1861 pci_disable_device(mlxsw_pci->pdev);
1865 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1867 pci_driver->probe = mlxsw_pci_probe;
1868 pci_driver->remove = mlxsw_pci_remove;
1869 return pci_register_driver(pci_driver);
1871 EXPORT_SYMBOL(mlxsw_pci_driver_register);
1873 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
1875 pci_unregister_driver(pci_driver);
1877 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
1879 static int __init mlxsw_pci_module_init(void)
1884 static void __exit mlxsw_pci_module_exit(void)
1888 module_init(mlxsw_pci_module_init);
1889 module_exit(mlxsw_pci_module_exit);
1891 MODULE_LICENSE("Dual BSD/GPL");
1892 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1893 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");