1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/export.h>
8 #include <linux/device.h>
10 #include <linux/interrupt.h>
11 #include <linux/wait.h>
12 #include <linux/types.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/log2.h>
16 #include <linux/string.h>
23 #include "resources.h"
25 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27 #define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
30 enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
37 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
39 static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
46 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
53 struct mlxsw_pci_mem_item {
59 struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
71 struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
87 enum mlxsw_pci_cqe_v v;
97 struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
103 struct pci_dev *pdev;
105 u64 free_running_clock_offset;
108 bool lag_mode_support;
110 enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
111 enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
112 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
114 struct mlxsw_core *core;
116 struct mlxsw_pci_mem_item *items;
120 struct mlxsw_pci_mem_item out_mbox;
121 struct mlxsw_pci_mem_item in_mbox;
122 struct mutex lock; /* Lock access to command registers */
124 wait_queue_head_t wait;
131 struct mlxsw_bus_info bus_info;
132 const struct pci_device_id *id;
133 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
134 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
138 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
140 tasklet_schedule(&q->tasklet);
143 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
144 size_t elem_size, int elem_index)
146 return q->mem_item.buf + (elem_size * elem_index);
149 static struct mlxsw_pci_queue_elem_info *
150 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
152 return &q->elem_info[elem_index];
155 static struct mlxsw_pci_queue_elem_info *
156 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
158 int index = q->producer_counter & (q->count - 1);
160 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
162 return mlxsw_pci_queue_elem_info_get(q, index);
165 static struct mlxsw_pci_queue_elem_info *
166 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
168 int index = q->consumer_counter & (q->count - 1);
170 return mlxsw_pci_queue_elem_info_get(q, index);
173 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
175 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
178 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
180 return owner_bit != !!(q->consumer_counter & q->count);
183 static struct mlxsw_pci_queue_type_group *
184 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
185 enum mlxsw_pci_queue_type q_type)
187 return &mlxsw_pci->queues[q_type];
190 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
191 enum mlxsw_pci_queue_type q_type)
193 struct mlxsw_pci_queue_type_group *queue_group;
195 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
196 return queue_group->count;
199 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
201 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
204 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
206 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
209 static struct mlxsw_pci_queue *
210 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
211 enum mlxsw_pci_queue_type q_type, u8 q_num)
213 return &mlxsw_pci->queues[q_type].q[q_num];
216 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
219 return __mlxsw_pci_queue_get(mlxsw_pci,
220 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
223 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
226 return __mlxsw_pci_queue_get(mlxsw_pci,
227 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
230 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
233 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
236 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
239 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
242 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
243 struct mlxsw_pci_queue *q,
246 mlxsw_pci_write32(mlxsw_pci,
247 DOORBELL(mlxsw_pci->doorbell_offset,
248 mlxsw_pci_doorbell_type_offset[q->type],
252 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
253 struct mlxsw_pci_queue *q,
256 mlxsw_pci_write32(mlxsw_pci,
257 DOORBELL(mlxsw_pci->doorbell_offset,
258 mlxsw_pci_doorbell_arm_type_offset[q->type],
262 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
263 struct mlxsw_pci_queue *q)
265 wmb(); /* ensure all writes are done before we ring a bell */
266 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
269 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
270 struct mlxsw_pci_queue *q)
272 wmb(); /* ensure all writes are done before we ring a bell */
273 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
274 q->consumer_counter + q->count);
278 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
279 struct mlxsw_pci_queue *q)
281 wmb(); /* ensure all writes are done before we ring a bell */
282 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
285 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
288 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
291 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
292 struct mlxsw_pci_queue *q)
299 q->producer_counter = 0;
300 q->consumer_counter = 0;
301 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
302 MLXSW_PCI_SDQ_CTL_TC;
303 lp = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_IGNORE_WQE :
304 MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
306 /* Set CQ of same number of this SDQ. */
307 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
308 mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
309 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
310 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
311 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
312 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
314 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
317 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
320 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
324 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
325 struct mlxsw_pci_queue *q)
327 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
330 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
331 int index, char *frag_data, size_t frag_len,
334 struct pci_dev *pdev = mlxsw_pci->pdev;
337 mapaddr = dma_map_single(&pdev->dev, frag_data, frag_len, direction);
338 if (unlikely(dma_mapping_error(&pdev->dev, mapaddr))) {
339 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
342 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
343 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
347 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
348 int index, int direction)
350 struct pci_dev *pdev = mlxsw_pci->pdev;
351 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
352 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
356 dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
359 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
360 struct mlxsw_pci_queue_elem_info *elem_info,
363 size_t buf_len = MLXSW_PORT_MAX_MTU;
364 char *wqe = elem_info->elem;
368 skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
372 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
373 buf_len, DMA_FROM_DEVICE);
377 elem_info->u.rdq.skb = skb;
381 dev_kfree_skb_any(skb);
385 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
386 struct mlxsw_pci_queue_elem_info *elem_info)
391 skb = elem_info->u.rdq.skb;
392 wqe = elem_info->elem;
394 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
395 dev_kfree_skb_any(skb);
398 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
399 struct mlxsw_pci_queue *q)
401 struct mlxsw_pci_queue_elem_info *elem_info;
402 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
406 q->producer_counter = 0;
407 q->consumer_counter = 0;
409 /* Set CQ of same number of this RDQ with base
410 * above SDQ count as the lower ones are assigned to SDQs.
412 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
413 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
414 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
415 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
417 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
420 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
424 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
426 for (i = 0; i < q->count; i++) {
427 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
429 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
432 /* Everything is set up, ring doorbell to pass elem to HW */
433 q->producer_counter++;
434 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
440 for (i--; i >= 0; i--) {
441 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
442 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
444 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
449 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
450 struct mlxsw_pci_queue *q)
452 struct mlxsw_pci_queue_elem_info *elem_info;
455 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
456 for (i = 0; i < q->count; i++) {
457 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
458 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
462 static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
463 struct mlxsw_pci_queue *q)
465 q->u.cq.v = mlxsw_pci->max_cqe_ver;
467 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
468 q->num < mlxsw_pci->num_sdq_cqs &&
469 !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
470 q->u.cq.v = MLXSW_PCI_CQE_V1;
473 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
474 struct mlxsw_pci_queue *q)
479 q->consumer_counter = 0;
481 for (i = 0; i < q->count; i++) {
482 char *elem = mlxsw_pci_queue_elem_get(q, i);
484 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
487 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
488 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
489 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
490 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
491 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
492 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
494 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
495 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
496 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
497 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
498 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
500 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
502 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
505 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
506 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
510 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
511 struct mlxsw_pci_queue *q)
513 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
516 static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
519 return ioread32be(mlxsw_pci->hw_addr + off);
522 static void mlxsw_pci_skb_cb_ts_set(struct mlxsw_pci *mlxsw_pci,
524 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
528 if (cqe_v != MLXSW_PCI_CQE_V2)
531 ts_type = mlxsw_pci_cqe2_time_stamp_type_get(cqe);
533 if (ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_UTC &&
534 ts_type != MLXSW_PCI_CQE_TIME_STAMP_TYPE_MIRROR_UTC)
537 mlxsw_skb_cb(skb)->cqe_ts.sec = mlxsw_pci_cqe2_time_stamp_sec_get(cqe);
538 mlxsw_skb_cb(skb)->cqe_ts.nsec =
539 mlxsw_pci_cqe2_time_stamp_nsec_get(cqe);
542 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
543 struct mlxsw_pci_queue *q,
544 u16 consumer_counter_limit,
545 enum mlxsw_pci_cqe_v cqe_v,
548 struct pci_dev *pdev = mlxsw_pci->pdev;
549 struct mlxsw_pci_queue_elem_info *elem_info;
550 struct mlxsw_tx_info tx_info;
556 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
557 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
558 skb = elem_info->u.sdq.skb;
559 wqe = elem_info->elem;
560 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
561 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
563 if (unlikely(!tx_info.is_emad &&
564 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
565 mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
566 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
572 dev_kfree_skb_any(skb);
573 elem_info->u.sdq.skb = NULL;
575 if (q->consumer_counter++ != consumer_counter_limit)
576 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
577 spin_unlock(&q->lock);
580 static void mlxsw_pci_cqe_rdq_md_tx_port_init(struct sk_buff *skb,
583 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
585 if (mlxsw_pci_cqe2_tx_lag_get(cqe)) {
586 cb->rx_md_info.tx_port_is_lag = true;
587 cb->rx_md_info.tx_lag_id = mlxsw_pci_cqe2_tx_lag_id_get(cqe);
588 cb->rx_md_info.tx_lag_port_index =
589 mlxsw_pci_cqe2_tx_lag_subport_get(cqe);
591 cb->rx_md_info.tx_port_is_lag = false;
592 cb->rx_md_info.tx_sys_port =
593 mlxsw_pci_cqe2_tx_system_port_get(cqe);
596 if (cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_MULTI_PORT &&
597 cb->rx_md_info.tx_sys_port != MLXSW_PCI_CQE2_TX_PORT_INVALID)
598 cb->rx_md_info.tx_port_valid = 1;
600 cb->rx_md_info.tx_port_valid = 0;
603 static void mlxsw_pci_cqe_rdq_md_init(struct sk_buff *skb, const char *cqe)
605 struct mlxsw_skb_cb *cb = mlxsw_skb_cb(skb);
607 cb->rx_md_info.tx_congestion = mlxsw_pci_cqe2_mirror_cong_get(cqe);
608 if (cb->rx_md_info.tx_congestion != MLXSW_PCI_CQE2_MIRROR_CONG_INVALID)
609 cb->rx_md_info.tx_congestion_valid = 1;
611 cb->rx_md_info.tx_congestion_valid = 0;
612 cb->rx_md_info.tx_congestion <<= MLXSW_PCI_CQE2_MIRROR_CONG_SHIFT;
614 cb->rx_md_info.latency = mlxsw_pci_cqe2_mirror_latency_get(cqe);
615 if (cb->rx_md_info.latency != MLXSW_PCI_CQE2_MIRROR_LATENCY_INVALID)
616 cb->rx_md_info.latency_valid = 1;
618 cb->rx_md_info.latency_valid = 0;
620 cb->rx_md_info.tx_tc = mlxsw_pci_cqe2_mirror_tclass_get(cqe);
621 if (cb->rx_md_info.tx_tc != MLXSW_PCI_CQE2_MIRROR_TCLASS_INVALID)
622 cb->rx_md_info.tx_tc_valid = 1;
624 cb->rx_md_info.tx_tc_valid = 0;
626 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
629 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
630 struct mlxsw_pci_queue *q,
631 u16 consumer_counter_limit,
632 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
634 struct pci_dev *pdev = mlxsw_pci->pdev;
635 struct mlxsw_pci_queue_elem_info *elem_info;
636 struct mlxsw_rx_info rx_info = {};
637 char wqe[MLXSW_PCI_WQE_SIZE];
642 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
643 skb = elem_info->u.rdq.skb;
644 memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
646 if (q->consumer_counter++ != consumer_counter_limit)
647 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
649 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
651 dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
655 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
657 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
658 rx_info.is_lag = true;
659 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
660 rx_info.lag_port_index =
661 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
663 rx_info.is_lag = false;
664 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
667 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
669 if (rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_INGRESS_ACL ||
670 rx_info.trap_id == MLXSW_TRAP_ID_DISCARD_EGRESS_ACL) {
671 u32 cookie_index = 0;
673 if (mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2)
674 cookie_index = mlxsw_pci_cqe2_user_def_val_orig_pkt_len_get(cqe);
675 mlxsw_skb_cb(skb)->rx_md_info.cookie_index = cookie_index;
676 } else if (rx_info.trap_id >= MLXSW_TRAP_ID_MIRROR_SESSION0 &&
677 rx_info.trap_id <= MLXSW_TRAP_ID_MIRROR_SESSION7 &&
678 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
679 rx_info.mirror_reason = mlxsw_pci_cqe2_mirror_reason_get(cqe);
680 mlxsw_pci_cqe_rdq_md_init(skb, cqe);
681 } else if (rx_info.trap_id == MLXSW_TRAP_ID_PKT_SAMPLE &&
682 mlxsw_pci->max_cqe_ver >= MLXSW_PCI_CQE_V2) {
683 mlxsw_pci_cqe_rdq_md_tx_port_init(skb, cqe);
686 mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
688 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
689 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
690 byte_count -= ETH_FCS_LEN;
691 skb_put(skb, byte_count);
692 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
695 /* Everything is set up, ring doorbell to pass elem to HW */
696 q->producer_counter++;
697 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
701 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
703 struct mlxsw_pci_queue_elem_info *elem_info;
707 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
708 elem = elem_info->elem;
709 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
710 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
712 q->consumer_counter++;
713 rmb(); /* make sure we read owned bit before the rest of elem */
717 static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
719 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
720 struct mlxsw_pci *mlxsw_pci = q->pci;
723 int credits = q->count >> 1;
725 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
726 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
727 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
728 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
729 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
731 memcpy(ncqe, cqe, q->elem_size);
732 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
735 struct mlxsw_pci_queue *sdq;
737 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
738 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
739 wqe_counter, q->u.cq.v, ncqe);
740 q->u.cq.comp_sdq_count++;
742 struct mlxsw_pci_queue *rdq;
744 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
745 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
746 wqe_counter, q->u.cq.v, ncqe);
747 q->u.cq.comp_rdq_count++;
749 if (++items == credits)
753 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
756 static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
758 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
759 MLXSW_PCI_CQE01_COUNT;
762 static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
764 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
765 MLXSW_PCI_CQE01_SIZE;
768 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
769 struct mlxsw_pci_queue *q)
774 q->consumer_counter = 0;
776 for (i = 0; i < q->count; i++) {
777 char *elem = mlxsw_pci_queue_elem_get(q, i);
779 mlxsw_pci_eqe_owner_set(elem, 1);
782 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
783 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
784 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
785 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
786 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
788 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
790 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
793 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
794 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
798 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
799 struct mlxsw_pci_queue *q)
801 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
804 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
806 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
807 mlxsw_pci->cmd.comp.out_param =
808 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
809 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
810 mlxsw_pci->cmd.wait_done = true;
811 wake_up(&mlxsw_pci->cmd.wait);
814 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
816 struct mlxsw_pci_queue_elem_info *elem_info;
820 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
821 elem = elem_info->elem;
822 owner_bit = mlxsw_pci_eqe_owner_get(elem);
823 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
825 q->consumer_counter++;
826 rmb(); /* make sure we read owned bit before the rest of elem */
830 static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
832 struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
833 struct mlxsw_pci *mlxsw_pci = q->pci;
834 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
835 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
838 bool cq_handle = false;
840 int credits = q->count >> 1;
842 memset(&active_cqns, 0, sizeof(active_cqns));
844 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
846 /* Command interface completion events are always received on
847 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
848 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
851 case MLXSW_PCI_EQ_ASYNC_NUM:
852 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
853 q->u.eq.ev_cmd_count++;
855 case MLXSW_PCI_EQ_COMP_NUM:
856 cqn = mlxsw_pci_eqe_cqn_get(eqe);
857 set_bit(cqn, active_cqns);
859 q->u.eq.ev_comp_count++;
862 q->u.eq.ev_other_count++;
864 if (++items == credits)
868 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
869 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
874 for_each_set_bit(cqn, active_cqns, cq_count) {
875 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
876 mlxsw_pci_queue_tasklet_schedule(q);
880 struct mlxsw_pci_queue_ops {
882 enum mlxsw_pci_queue_type type;
883 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
884 struct mlxsw_pci_queue *q);
885 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
886 struct mlxsw_pci_queue *q);
887 void (*fini)(struct mlxsw_pci *mlxsw_pci,
888 struct mlxsw_pci_queue *q);
889 void (*tasklet)(struct tasklet_struct *t);
890 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
891 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
896 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
897 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
898 .init = mlxsw_pci_sdq_init,
899 .fini = mlxsw_pci_sdq_fini,
900 .elem_count = MLXSW_PCI_WQE_COUNT,
901 .elem_size = MLXSW_PCI_WQE_SIZE,
904 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
905 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
906 .init = mlxsw_pci_rdq_init,
907 .fini = mlxsw_pci_rdq_fini,
908 .elem_count = MLXSW_PCI_WQE_COUNT,
909 .elem_size = MLXSW_PCI_WQE_SIZE
912 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
913 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
914 .pre_init = mlxsw_pci_cq_pre_init,
915 .init = mlxsw_pci_cq_init,
916 .fini = mlxsw_pci_cq_fini,
917 .tasklet = mlxsw_pci_cq_tasklet,
918 .elem_count_f = mlxsw_pci_cq_elem_count,
919 .elem_size_f = mlxsw_pci_cq_elem_size
922 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
923 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
924 .init = mlxsw_pci_eq_init,
925 .fini = mlxsw_pci_eq_fini,
926 .tasklet = mlxsw_pci_eq_tasklet,
927 .elem_count = MLXSW_PCI_EQE_COUNT,
928 .elem_size = MLXSW_PCI_EQE_SIZE
931 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
932 const struct mlxsw_pci_queue_ops *q_ops,
933 struct mlxsw_pci_queue *q, u8 q_num)
935 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
941 q_ops->pre_init(mlxsw_pci, q);
943 spin_lock_init(&q->lock);
944 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
946 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
948 q->type = q_ops->type;
952 tasklet_setup(&q->tasklet, q_ops->tasklet);
954 mem_item->size = MLXSW_PCI_AQ_SIZE;
955 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
956 mem_item->size, &mem_item->mapaddr,
961 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
964 goto err_elem_info_alloc;
967 /* Initialize dma mapped elements info elem_info for
968 * future easy access.
970 for (i = 0; i < q->count; i++) {
971 struct mlxsw_pci_queue_elem_info *elem_info;
973 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
975 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
978 mlxsw_cmd_mbox_zero(mbox);
979 err = q_ops->init(mlxsw_pci, mbox, q);
987 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
988 mem_item->buf, mem_item->mapaddr);
992 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
993 const struct mlxsw_pci_queue_ops *q_ops,
994 struct mlxsw_pci_queue *q)
996 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
998 q_ops->fini(mlxsw_pci, q);
1000 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1001 mem_item->buf, mem_item->mapaddr);
1004 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1005 const struct mlxsw_pci_queue_ops *q_ops,
1008 struct mlxsw_pci_queue_type_group *queue_group;
1012 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1013 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
1014 if (!queue_group->q)
1017 for (i = 0; i < num_qs; i++) {
1018 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1019 &queue_group->q[i], i);
1021 goto err_queue_init;
1023 queue_group->count = num_qs;
1028 for (i--; i >= 0; i--)
1029 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1030 kfree(queue_group->q);
1034 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1035 const struct mlxsw_pci_queue_ops *q_ops)
1037 struct mlxsw_pci_queue_type_group *queue_group;
1040 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1041 for (i = 0; i < queue_group->count; i++)
1042 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1043 kfree(queue_group->q);
1046 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1048 struct pci_dev *pdev = mlxsw_pci->pdev;
1060 mlxsw_cmd_mbox_zero(mbox);
1061 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1065 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1066 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1067 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1068 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1069 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1070 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1071 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
1072 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1073 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1075 if (num_sdqs + num_rdqs > num_cqs ||
1076 num_sdqs < MLXSW_PCI_SDQS_MIN ||
1077 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
1078 dev_err(&pdev->dev, "Unsupported number of queues\n");
1082 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1083 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1084 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
1085 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
1086 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
1087 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1088 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1092 mlxsw_pci->num_sdq_cqs = num_sdqs;
1094 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1097 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1101 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1104 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1108 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1111 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1115 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1118 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1122 /* We have to poll in command interface until queues are initialized */
1123 mlxsw_pci->cmd.nopoll = true;
1127 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1129 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1131 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1135 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1137 mlxsw_pci->cmd.nopoll = false;
1138 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1139 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1140 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1141 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1145 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1146 char *mbox, int index,
1147 const struct mlxsw_swid_config *swid)
1151 if (swid->used_type) {
1152 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1153 mbox, index, swid->type);
1156 if (swid->used_properties) {
1157 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1158 mbox, index, swid->properties);
1161 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1165 mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1166 const struct mlxsw_config_profile *profile,
1167 struct mlxsw_res *res)
1169 u64 single_size, double_size, linear_size;
1172 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1173 &single_size, &double_size,
1178 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1179 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1180 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
1185 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1186 const struct mlxsw_config_profile *profile,
1187 struct mlxsw_res *res)
1192 mlxsw_cmd_mbox_zero(mbox);
1194 if (profile->used_max_vepa_channels) {
1195 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1197 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1198 mbox, profile->max_vepa_channels);
1200 if (profile->used_max_lag) {
1201 mlxsw_cmd_mbox_config_profile_set_max_lag_set(mbox, 1);
1202 mlxsw_cmd_mbox_config_profile_max_lag_set(mbox,
1205 if (profile->used_max_mid) {
1206 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1208 mlxsw_cmd_mbox_config_profile_max_mid_set(
1209 mbox, profile->max_mid);
1211 if (profile->used_max_pgt) {
1212 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1214 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1215 mbox, profile->max_pgt);
1217 if (profile->used_max_system_port) {
1218 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1220 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1221 mbox, profile->max_system_port);
1223 if (profile->used_max_vlan_groups) {
1224 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1226 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1227 mbox, profile->max_vlan_groups);
1229 if (profile->used_max_regions) {
1230 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1232 mlxsw_cmd_mbox_config_profile_max_regions_set(
1233 mbox, profile->max_regions);
1235 if (profile->used_flood_tables) {
1236 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1238 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1239 mbox, profile->max_flood_tables);
1240 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1241 mbox, profile->max_vid_flood_tables);
1242 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1243 mbox, profile->max_fid_offset_flood_tables);
1244 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1245 mbox, profile->fid_offset_flood_table_size);
1246 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1247 mbox, profile->max_fid_flood_tables);
1248 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1249 mbox, profile->fid_flood_table_size);
1251 if (profile->flood_mode_prefer_cff && mlxsw_pci->cff_support) {
1252 enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode =
1253 MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CFF;
1255 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(mbox, 1);
1256 mlxsw_cmd_mbox_config_profile_flood_mode_set(mbox, flood_mode);
1257 mlxsw_pci->flood_mode = flood_mode;
1258 } else if (profile->used_flood_mode) {
1259 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1261 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1262 mbox, profile->flood_mode);
1263 mlxsw_pci->flood_mode = profile->flood_mode;
1268 if (profile->used_max_ib_mc) {
1269 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1271 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1272 mbox, profile->max_ib_mc);
1274 if (profile->used_max_pkey) {
1275 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1277 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1278 mbox, profile->max_pkey);
1280 if (profile->used_ar_sec) {
1281 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1283 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1284 mbox, profile->ar_sec);
1286 if (profile->used_adaptive_routing_group_cap) {
1287 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1289 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1290 mbox, profile->adaptive_routing_group_cap);
1292 if (profile->used_ubridge) {
1293 mlxsw_cmd_mbox_config_profile_set_ubridge_set(mbox, 1);
1294 mlxsw_cmd_mbox_config_profile_ubridge_set(mbox,
1297 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
1298 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
1302 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1303 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
1304 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
1305 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1307 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
1308 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
1309 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
1311 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
1312 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
1315 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1316 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1317 &profile->swid_config[i]);
1319 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1320 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1321 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1324 if (profile->used_cqe_time_stamp_type) {
1325 mlxsw_cmd_mbox_config_profile_set_cqe_time_stamp_type_set(mbox,
1327 mlxsw_cmd_mbox_config_profile_cqe_time_stamp_type_set(mbox,
1328 profile->cqe_time_stamp_type);
1331 if (profile->lag_mode_prefer_sw && mlxsw_pci->lag_mode_support) {
1332 enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode =
1333 MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW;
1335 mlxsw_cmd_mbox_config_profile_set_lag_mode_set(mbox, 1);
1336 mlxsw_cmd_mbox_config_profile_lag_mode_set(mbox, lag_mode);
1337 mlxsw_pci->lag_mode = lag_mode;
1339 mlxsw_pci->lag_mode = MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_FW;
1341 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1344 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1346 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1349 mlxsw_cmd_mbox_zero(mbox);
1350 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1353 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1354 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1358 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1361 struct mlxsw_pci_mem_item *mem_item;
1366 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1368 if (!mlxsw_pci->fw_area.items)
1370 mlxsw_pci->fw_area.count = num_pages;
1372 mlxsw_cmd_mbox_zero(mbox);
1373 for (i = 0; i < num_pages; i++) {
1374 mem_item = &mlxsw_pci->fw_area.items[i];
1376 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1377 mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
1379 &mem_item->mapaddr, GFP_KERNEL);
1380 if (!mem_item->buf) {
1384 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1385 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1386 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1387 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1389 goto err_cmd_map_fa;
1391 mlxsw_cmd_mbox_zero(mbox);
1396 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1398 goto err_cmd_map_fa;
1405 for (i--; i >= 0; i--) {
1406 mem_item = &mlxsw_pci->fw_area.items[i];
1408 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1409 mem_item->buf, mem_item->mapaddr);
1411 kfree(mlxsw_pci->fw_area.items);
1415 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1417 struct mlxsw_pci_mem_item *mem_item;
1420 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1422 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
1423 mem_item = &mlxsw_pci->fw_area.items[i];
1425 dma_free_coherent(&mlxsw_pci->pdev->dev, mem_item->size,
1426 mem_item->buf, mem_item->mapaddr);
1428 kfree(mlxsw_pci->fw_area.items);
1431 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1433 struct mlxsw_pci *mlxsw_pci = dev_id;
1434 struct mlxsw_pci_queue *q;
1437 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1438 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1439 mlxsw_pci_queue_tasklet_schedule(q);
1444 static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1445 struct mlxsw_pci_mem_item *mbox)
1447 struct pci_dev *pdev = mlxsw_pci->pdev;
1450 mbox->size = MLXSW_CMD_MBOX_SIZE;
1451 mbox->buf = dma_alloc_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE,
1452 &mbox->mapaddr, GFP_KERNEL);
1454 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1461 static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1462 struct mlxsw_pci_mem_item *mbox)
1464 struct pci_dev *pdev = mlxsw_pci->pdev;
1466 dma_free_coherent(&pdev->dev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1470 static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
1471 const struct pci_device_id *id,
1477 /* We must wait for the HW to become responsive. */
1478 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1480 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1482 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1483 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
1486 } while (time_before(jiffies, end));
1488 *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
1493 static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
1495 struct pci_dev *pdev = mlxsw_pci->pdev;
1496 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1499 mlxsw_reg_mrsr_pack(mrsr_pl,
1500 MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
1501 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1505 device_lock_assert(&pdev->dev);
1507 pci_cfg_access_lock(pdev);
1508 pci_save_state(pdev);
1510 err = __pci_reset_function_locked(pdev);
1512 pci_err(pdev, "PCI function reset failed with %d\n", err);
1514 pci_restore_state(pdev);
1515 pci_cfg_access_unlock(pdev);
1520 static int mlxsw_pci_reset_sw(struct mlxsw_pci *mlxsw_pci)
1522 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1524 mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_SOFTWARE_RESET);
1525 return mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1529 mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
1531 struct pci_dev *pdev = mlxsw_pci->pdev;
1532 char mcam_pl[MLXSW_REG_MCAM_LEN];
1533 bool pci_reset_supported;
1537 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1539 dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
1544 /* PCI core already issued a PCI reset, do not issue another reset. */
1545 if (mlxsw_pci->skip_reset)
1548 mlxsw_reg_mcam_pack(mcam_pl,
1549 MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
1550 err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
1554 mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
1555 &pci_reset_supported);
1557 if (pci_reset_supported) {
1558 pci_dbg(pdev, "Starting PCI reset flow\n");
1559 err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
1561 pci_dbg(pdev, "Starting software reset flow\n");
1562 err = mlxsw_pci_reset_sw(mlxsw_pci);
1567 err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
1569 dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
1577 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1581 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1583 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1587 static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1589 pci_free_irq_vectors(mlxsw_pci->pdev);
1592 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1593 const struct mlxsw_config_profile *profile,
1594 struct mlxsw_res *res)
1596 struct mlxsw_pci *mlxsw_pci = bus_priv;
1597 struct pci_dev *pdev = mlxsw_pci->pdev;
1602 mlxsw_pci->core = mlxsw_core;
1604 mbox = mlxsw_cmd_mbox_alloc();
1608 err = mlxsw_pci_reset(mlxsw_pci, mlxsw_pci->id);
1612 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1614 dev_err(&pdev->dev, "MSI-X init failed\n");
1618 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1622 mlxsw_pci->bus_info.fw_rev.major =
1623 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1624 mlxsw_pci->bus_info.fw_rev.minor =
1625 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1626 mlxsw_pci->bus_info.fw_rev.subminor =
1627 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1629 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1630 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1634 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1635 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1637 goto err_doorbell_page_bar;
1640 mlxsw_pci->doorbell_offset =
1641 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1643 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1644 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1646 goto err_fr_rn_clk_bar;
1649 mlxsw_pci->free_running_clock_offset =
1650 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1652 if (mlxsw_cmd_mbox_query_fw_utc_sec_bar_get(mbox) != 0) {
1653 dev_err(&pdev->dev, "Unsupported UTC sec BAR queried from hw\n");
1655 goto err_utc_sec_bar;
1658 mlxsw_pci->utc_sec_offset =
1659 mlxsw_cmd_mbox_query_fw_utc_sec_offset_get(mbox);
1661 if (mlxsw_cmd_mbox_query_fw_utc_nsec_bar_get(mbox) != 0) {
1662 dev_err(&pdev->dev, "Unsupported UTC nsec BAR queried from hw\n");
1664 goto err_utc_nsec_bar;
1667 mlxsw_pci->utc_nsec_offset =
1668 mlxsw_cmd_mbox_query_fw_utc_nsec_offset_get(mbox);
1670 mlxsw_pci->lag_mode_support =
1671 mlxsw_cmd_mbox_query_fw_lag_mode_support_get(mbox);
1672 mlxsw_pci->cff_support =
1673 mlxsw_cmd_mbox_query_fw_cff_support_get(mbox);
1675 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1676 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1678 goto err_fw_area_init;
1680 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1684 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1686 goto err_query_resources;
1688 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1689 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1690 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1691 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1692 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1693 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1694 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1695 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1696 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1697 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1699 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1700 goto err_cqe_v_check;
1703 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
1705 goto err_config_profile;
1707 /* Some resources depend on details of config_profile, such as unified
1708 * bridge model. Query the resources again to get correct values.
1710 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
1712 goto err_requery_resources;
1714 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1718 err = request_irq(pci_irq_vector(pdev, 0),
1719 mlxsw_pci_eq_irq_handler, 0,
1720 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
1722 dev_err(&pdev->dev, "IRQ request failed\n");
1723 goto err_request_eq_irq;
1729 mlxsw_pci_aqs_fini(mlxsw_pci);
1731 err_requery_resources:
1734 err_query_resources:
1736 mlxsw_pci_fw_area_fini(mlxsw_pci);
1741 err_doorbell_page_bar:
1744 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1748 mlxsw_cmd_mbox_free(mbox);
1752 static void mlxsw_pci_fini(void *bus_priv)
1754 struct mlxsw_pci *mlxsw_pci = bus_priv;
1756 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
1757 mlxsw_pci_aqs_fini(mlxsw_pci);
1758 mlxsw_pci_fw_area_fini(mlxsw_pci);
1759 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1762 static struct mlxsw_pci_queue *
1763 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1764 const struct mlxsw_tx_info *tx_info)
1766 u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1769 if (tx_info->is_emad) {
1770 sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1772 BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1773 sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1776 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1779 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1780 const struct mlxsw_tx_info *tx_info)
1782 struct mlxsw_pci *mlxsw_pci = bus_priv;
1783 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1785 return !mlxsw_pci_queue_elem_info_producer_get(q);
1788 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1789 const struct mlxsw_tx_info *tx_info)
1791 struct mlxsw_pci *mlxsw_pci = bus_priv;
1792 struct mlxsw_pci_queue *q;
1793 struct mlxsw_pci_queue_elem_info *elem_info;
1798 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1799 err = skb_linearize(skb);
1804 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1805 spin_lock_bh(&q->lock);
1806 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1812 mlxsw_skb_cb(skb)->tx_info = *tx_info;
1813 elem_info->u.sdq.skb = skb;
1815 wqe = elem_info->elem;
1816 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1817 mlxsw_pci_wqe_lp_set(wqe, 0);
1818 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1820 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1821 skb_headlen(skb), DMA_TO_DEVICE);
1825 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1826 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1828 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1829 skb_frag_address(frag),
1830 skb_frag_size(frag),
1836 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1837 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1839 /* Set unused sq entries byte count to zero. */
1840 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1841 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1843 /* Everything is set up, ring producer doorbell to get HW going */
1844 q->producer_counter++;
1845 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1851 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1853 spin_unlock_bh(&q->lock);
1857 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1858 u32 in_mod, bool out_mbox_direct,
1859 char *in_mbox, size_t in_mbox_size,
1860 char *out_mbox, size_t out_mbox_size,
1863 struct mlxsw_pci *mlxsw_pci = bus_priv;
1864 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
1865 bool evreq = mlxsw_pci->cmd.nopoll;
1866 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1867 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1870 *p_status = MLXSW_CMD_STATUS_OK;
1872 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1877 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
1878 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1880 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1881 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
1884 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
1885 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1886 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
1888 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1889 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1891 *p_wait_done = false;
1893 wmb(); /* all needs to be written before we write control register */
1894 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1895 MLXSW_PCI_CIR_CTRL_GO_BIT |
1896 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1897 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1903 end = jiffies + timeout;
1905 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1907 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1908 *p_wait_done = true;
1909 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1913 } while (time_before(jiffies, end));
1915 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1916 *p_status = mlxsw_pci->cmd.comp.status;
1927 if (!err && out_mbox && out_mbox_direct) {
1928 /* Some commands don't use output param as address to mailbox
1929 * but they store output directly into registers. In that case,
1930 * copy registers into mbox buffer.
1935 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1937 memcpy(out_mbox, &tmp, sizeof(tmp));
1938 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1940 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1942 } else if (!err && out_mbox) {
1943 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
1946 mutex_unlock(&mlxsw_pci->cmd.lock);
1951 static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1953 struct mlxsw_pci *mlxsw_pci = bus_priv;
1956 frc_offset_h = mlxsw_pci->free_running_clock_offset;
1957 return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_h);
1960 static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1962 struct mlxsw_pci *mlxsw_pci = bus_priv;
1965 frc_offset_l = mlxsw_pci->free_running_clock_offset + 4;
1966 return mlxsw_pci_read32_off(mlxsw_pci, frc_offset_l);
1969 static u32 mlxsw_pci_read_utc_sec(void *bus_priv)
1971 struct mlxsw_pci *mlxsw_pci = bus_priv;
1973 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_sec_offset);
1976 static u32 mlxsw_pci_read_utc_nsec(void *bus_priv)
1978 struct mlxsw_pci *mlxsw_pci = bus_priv;
1980 return mlxsw_pci_read32_off(mlxsw_pci, mlxsw_pci->utc_nsec_offset);
1983 static enum mlxsw_cmd_mbox_config_profile_lag_mode
1984 mlxsw_pci_lag_mode(void *bus_priv)
1986 struct mlxsw_pci *mlxsw_pci = bus_priv;
1988 return mlxsw_pci->lag_mode;
1991 static enum mlxsw_cmd_mbox_config_profile_flood_mode
1992 mlxsw_pci_flood_mode(void *bus_priv)
1994 struct mlxsw_pci *mlxsw_pci = bus_priv;
1996 return mlxsw_pci->flood_mode;
1999 static const struct mlxsw_bus mlxsw_pci_bus = {
2001 .init = mlxsw_pci_init,
2002 .fini = mlxsw_pci_fini,
2003 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
2004 .skb_transmit = mlxsw_pci_skb_transmit,
2005 .cmd_exec = mlxsw_pci_cmd_exec,
2006 .read_frc_h = mlxsw_pci_read_frc_h,
2007 .read_frc_l = mlxsw_pci_read_frc_l,
2008 .read_utc_sec = mlxsw_pci_read_utc_sec,
2009 .read_utc_nsec = mlxsw_pci_read_utc_nsec,
2010 .lag_mode = mlxsw_pci_lag_mode,
2011 .flood_mode = mlxsw_pci_flood_mode,
2012 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
2015 static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
2019 mutex_init(&mlxsw_pci->cmd.lock);
2020 init_waitqueue_head(&mlxsw_pci->cmd.wait);
2022 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2024 goto err_in_mbox_alloc;
2026 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2028 goto err_out_mbox_alloc;
2033 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2035 mutex_destroy(&mlxsw_pci->cmd.lock);
2039 static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
2041 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
2042 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
2043 mutex_destroy(&mlxsw_pci->cmd.lock);
2046 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2048 const char *driver_name = dev_driver_string(&pdev->dev);
2049 struct mlxsw_pci *mlxsw_pci;
2052 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
2056 err = pci_enable_device(pdev);
2058 dev_err(&pdev->dev, "pci_enable_device failed\n");
2059 goto err_pci_enable_device;
2062 err = pci_request_regions(pdev, driver_name);
2064 dev_err(&pdev->dev, "pci_request_regions failed\n");
2065 goto err_pci_request_regions;
2068 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2070 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2072 dev_err(&pdev->dev, "dma_set_mask failed\n");
2073 goto err_pci_set_dma_mask;
2077 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
2078 dev_err(&pdev->dev, "invalid PCI region size\n");
2080 goto err_pci_resource_len_check;
2083 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
2084 pci_resource_len(pdev, 0));
2085 if (!mlxsw_pci->hw_addr) {
2086 dev_err(&pdev->dev, "ioremap failed\n");
2090 pci_set_master(pdev);
2092 mlxsw_pci->pdev = pdev;
2093 pci_set_drvdata(pdev, mlxsw_pci);
2095 err = mlxsw_pci_cmd_init(mlxsw_pci);
2097 goto err_pci_cmd_init;
2099 mlxsw_pci->bus_info.device_kind = driver_name;
2100 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
2101 mlxsw_pci->bus_info.dev = &pdev->dev;
2102 mlxsw_pci->bus_info.read_clock_capable = true;
2105 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
2106 &mlxsw_pci_bus, mlxsw_pci, false,
2109 dev_err(&pdev->dev, "cannot register bus device\n");
2110 goto err_bus_device_register;
2115 err_bus_device_register:
2116 mlxsw_pci_cmd_fini(mlxsw_pci);
2118 iounmap(mlxsw_pci->hw_addr);
2120 err_pci_resource_len_check:
2121 err_pci_set_dma_mask:
2122 pci_release_regions(pdev);
2123 err_pci_request_regions:
2124 pci_disable_device(pdev);
2125 err_pci_enable_device:
2130 static void mlxsw_pci_remove(struct pci_dev *pdev)
2132 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2134 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2135 mlxsw_pci_cmd_fini(mlxsw_pci);
2136 iounmap(mlxsw_pci->hw_addr);
2137 pci_release_regions(mlxsw_pci->pdev);
2138 pci_disable_device(mlxsw_pci->pdev);
2142 static void mlxsw_pci_reset_prepare(struct pci_dev *pdev)
2144 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2146 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
2149 static void mlxsw_pci_reset_done(struct pci_dev *pdev)
2151 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
2153 mlxsw_pci->skip_reset = true;
2154 mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus,
2155 mlxsw_pci, false, NULL, NULL);
2156 mlxsw_pci->skip_reset = false;
2159 static const struct pci_error_handlers mlxsw_pci_err_handler = {
2160 .reset_prepare = mlxsw_pci_reset_prepare,
2161 .reset_done = mlxsw_pci_reset_done,
2164 int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
2166 pci_driver->probe = mlxsw_pci_probe;
2167 pci_driver->remove = mlxsw_pci_remove;
2168 pci_driver->shutdown = mlxsw_pci_remove;
2169 pci_driver->err_handler = &mlxsw_pci_err_handler;
2170 return pci_register_driver(pci_driver);
2172 EXPORT_SYMBOL(mlxsw_pci_driver_register);
2174 void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
2176 pci_unregister_driver(pci_driver);
2178 EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
2180 static int __init mlxsw_pci_module_init(void)
2185 static void __exit mlxsw_pci_module_exit(void)
2189 module_init(mlxsw_pci_module_init);
2190 module_exit(mlxsw_pci_module_exit);
2192 MODULE_LICENSE("Dual BSD/GPL");
2193 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
2194 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");