1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
7 #include <linux/vmalloc.h>
9 /* ENETC overhead: optional extension BD + 1 BD gap */
10 #define ENETC_TXBDS_NEEDED(val) ((val) + 2)
11 /* max # of chained Tx BDs is 15, including head and extension BD */
12 #define ENETC_MAX_SKB_FRAGS 13
13 #define ENETC_TXBDS_MAX_NEEDED ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
15 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
18 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
20 struct enetc_ndev_priv *priv = netdev_priv(ndev);
21 struct enetc_bdr *tx_ring;
24 tx_ring = priv->tx_ring[skb->queue_mapping];
26 if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
27 if (unlikely(skb_linearize(skb)))
30 count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
31 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
32 netif_stop_subqueue(ndev, tx_ring->index);
33 return NETDEV_TX_BUSY;
37 count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
43 if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
44 netif_stop_subqueue(ndev, tx_ring->index);
49 dev_kfree_skb_any(skb);
53 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
54 struct enetc_tx_swbd *tx_swbd)
56 if (tx_swbd->is_dma_page)
57 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
58 tx_swbd->len, DMA_TO_DEVICE);
60 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
61 tx_swbd->len, DMA_TO_DEVICE);
65 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
66 struct enetc_tx_swbd *tx_swbd)
69 enetc_unmap_tx_buff(tx_ring, tx_swbd);
72 dev_kfree_skb_any(tx_swbd->skb);
77 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
80 struct enetc_tx_swbd *tx_swbd;
82 int len = skb_headlen(skb);
83 union enetc_tx_bd temp_bd;
84 union enetc_tx_bd *txbd;
85 bool do_vlan, do_tstamp;
91 i = tx_ring->next_to_use;
92 txbd = ENETC_TXBD(*tx_ring, i);
95 dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
96 if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
99 temp_bd.addr = cpu_to_le64(dma);
100 temp_bd.buf_len = cpu_to_le16(len);
103 tx_swbd = &tx_ring->tx_swbd[i];
106 tx_swbd->is_dma_page = 0;
109 do_vlan = skb_vlan_tag_present(skb);
110 do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
111 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
112 tx_swbd->do_tstamp = do_tstamp;
113 tx_swbd->check_wb = tx_swbd->do_tstamp;
115 if (do_vlan || do_tstamp)
116 flags |= ENETC_TXBD_FLAGS_EX;
118 if (tx_ring->tsd_enable)
119 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
121 /* first BD needs frm_len and offload flags set */
122 temp_bd.frm_len = cpu_to_le16(skb->len);
123 temp_bd.flags = flags;
125 if (flags & ENETC_TXBD_FLAGS_TSE)
126 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
129 if (flags & ENETC_TXBD_FLAGS_EX) {
132 enetc_clear_tx_bd(&temp_bd);
134 /* add extension BD for VLAN and/or timestamping */
139 if (unlikely(i == tx_ring->bd_count)) {
141 tx_swbd = tx_ring->tx_swbd;
142 txbd = ENETC_TXBD(*tx_ring, 0);
147 temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
148 temp_bd.ext.tpid = 0; /* < C-TAG */
149 e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
153 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
154 e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
157 temp_bd.ext.e_flags = e_flags;
161 frag = &skb_shinfo(skb)->frags[0];
162 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
163 len = skb_frag_size(frag);
164 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
166 if (dma_mapping_error(tx_ring->dev, dma))
170 enetc_clear_tx_bd(&temp_bd);
176 if (unlikely(i == tx_ring->bd_count)) {
178 tx_swbd = tx_ring->tx_swbd;
179 txbd = ENETC_TXBD(*tx_ring, 0);
183 temp_bd.addr = cpu_to_le64(dma);
184 temp_bd.buf_len = cpu_to_le16(len);
188 tx_swbd->is_dma_page = 1;
192 /* last BD needs 'F' bit set */
193 flags |= ENETC_TXBD_FLAGS_F;
194 temp_bd.flags = flags;
197 tx_ring->tx_swbd[i].skb = skb;
199 enetc_bdr_idx_inc(tx_ring, &i);
200 tx_ring->next_to_use = i;
202 skb_tx_timestamp(skb);
204 /* let H/W know BD ring has been updated */
205 enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
210 dev_err(tx_ring->dev, "DMA map error");
213 tx_swbd = &tx_ring->tx_swbd[i];
214 enetc_free_tx_skb(tx_ring, tx_swbd);
216 i = tx_ring->bd_count;
223 static irqreturn_t enetc_msix(int irq, void *data)
225 struct enetc_int_vector *v = data;
230 /* disable interrupts */
231 enetc_wr_reg_hot(v->rbier, 0);
232 enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
234 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
235 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
239 napi_schedule(&v->napi);
244 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
245 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
246 struct napi_struct *napi, int work_limit);
248 static void enetc_rx_dim_work(struct work_struct *w)
250 struct dim *dim = container_of(w, struct dim, work);
251 struct dim_cq_moder moder =
252 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
253 struct enetc_int_vector *v =
254 container_of(dim, struct enetc_int_vector, rx_dim);
256 v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
257 dim->state = DIM_START_MEASURE;
260 static void enetc_rx_net_dim(struct enetc_int_vector *v)
262 struct dim_sample dim_sample;
266 if (!v->rx_napi_work)
269 dim_update_sample(v->comp_cnt,
270 v->rx_ring.stats.packets,
271 v->rx_ring.stats.bytes,
273 net_dim(&v->rx_dim, dim_sample);
276 static int enetc_poll(struct napi_struct *napi, int budget)
278 struct enetc_int_vector
279 *v = container_of(napi, struct enetc_int_vector, napi);
280 bool complete = true;
286 for (i = 0; i < v->count_tx_rings; i++)
287 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
290 work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
291 if (work_done == budget)
294 v->rx_napi_work = true;
301 napi_complete_done(napi, work_done);
303 if (likely(v->rx_dim_en))
306 v->rx_napi_work = false;
308 /* enable interrupts */
309 enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
311 for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
312 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
320 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
322 int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
324 return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
327 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
330 u32 lo, hi, tstamp_lo;
332 lo = enetc_rd_hot(hw, ENETC_SICTR0);
333 hi = enetc_rd_hot(hw, ENETC_SICTR1);
334 tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
337 *tstamp = (u64)hi << 32 | tstamp_lo;
340 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
342 struct skb_shared_hwtstamps shhwtstamps;
344 if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
345 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
346 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
347 /* Ensure skb_mstamp_ns, which might have been populated with
348 * the txtime, is not mistaken for a software timestamp,
349 * because this will prevent the dispatch of our hardware
350 * timestamp to the socket.
352 skb->tstamp = ktime_set(0, 0);
353 skb_tstamp_tx(skb, &shhwtstamps);
357 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
359 struct net_device *ndev = tx_ring->ndev;
360 int tx_frm_cnt = 0, tx_byte_cnt = 0;
361 struct enetc_tx_swbd *tx_swbd;
366 i = tx_ring->next_to_clean;
367 tx_swbd = &tx_ring->tx_swbd[i];
369 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
373 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
374 bool is_eof = !!tx_swbd->skb;
376 if (unlikely(tx_swbd->check_wb)) {
377 struct enetc_ndev_priv *priv = netdev_priv(ndev);
378 union enetc_tx_bd *txbd;
380 txbd = ENETC_TXBD(*tx_ring, i);
382 if (txbd->flags & ENETC_TXBD_FLAGS_W &&
383 tx_swbd->do_tstamp) {
384 enetc_get_tx_tstamp(&priv->si->hw, txbd,
390 if (likely(tx_swbd->dma))
391 enetc_unmap_tx_buff(tx_ring, tx_swbd);
394 if (unlikely(do_tstamp)) {
395 enetc_tstamp_tx(tx_swbd->skb, tstamp);
398 napi_consume_skb(tx_swbd->skb, napi_budget);
402 tx_byte_cnt += tx_swbd->len;
407 if (unlikely(i == tx_ring->bd_count)) {
409 tx_swbd = tx_ring->tx_swbd;
412 /* BD iteration loop end */
415 /* re-arm interrupt source */
416 enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
417 BIT(16 + tx_ring->index));
420 if (unlikely(!bds_to_clean))
421 bds_to_clean = enetc_bd_ready_count(tx_ring, i);
424 tx_ring->next_to_clean = i;
425 tx_ring->stats.packets += tx_frm_cnt;
426 tx_ring->stats.bytes += tx_byte_cnt;
428 if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
429 __netif_subqueue_stopped(ndev, tx_ring->index) &&
430 (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
431 netif_wake_subqueue(ndev, tx_ring->index);
434 return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
437 static bool enetc_new_page(struct enetc_bdr *rx_ring,
438 struct enetc_rx_swbd *rx_swbd)
443 page = dev_alloc_page();
447 addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
448 if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
455 rx_swbd->page = page;
456 rx_swbd->page_offset = ENETC_RXB_PAD;
461 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
463 struct enetc_rx_swbd *rx_swbd;
464 union enetc_rx_bd *rxbd;
467 i = rx_ring->next_to_use;
468 rx_swbd = &rx_ring->rx_swbd[i];
469 rxbd = enetc_rxbd(rx_ring, i);
471 for (j = 0; j < buff_cnt; j++) {
473 if (unlikely(!rx_swbd->page)) {
474 if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
475 rx_ring->stats.rx_alloc_errs++;
481 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
482 rx_swbd->page_offset);
483 /* clear 'R" as well */
486 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
489 if (unlikely(i == rx_ring->bd_count)) {
491 rx_swbd = rx_ring->rx_swbd;
496 rx_ring->next_to_alloc = i; /* keep track from page reuse */
497 rx_ring->next_to_use = i;
503 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
504 static void enetc_get_rx_tstamp(struct net_device *ndev,
505 union enetc_rx_bd *rxbd,
508 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
509 struct enetc_ndev_priv *priv = netdev_priv(ndev);
510 struct enetc_hw *hw = &priv->si->hw;
511 u32 lo, hi, tstamp_lo;
514 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
515 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
516 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
517 rxbd = enetc_rxbd_ext(rxbd);
518 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
522 tstamp = (u64)hi << 32 | tstamp_lo;
523 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
524 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
529 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
530 union enetc_rx_bd *rxbd, struct sk_buff *skb)
532 struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
535 if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
536 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
538 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
539 skb->ip_summed = CHECKSUM_COMPLETE;
542 if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
545 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
547 tpid = htons(ETH_P_8021Q);
550 tpid = htons(ETH_P_8021AD);
553 tpid = htons(enetc_port_rd(&priv->si->hw,
557 tpid = htons(enetc_port_rd(&priv->si->hw,
564 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
567 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
568 if (priv->active_offloads & ENETC_F_RX_TSTAMP)
569 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
573 static void enetc_process_skb(struct enetc_bdr *rx_ring,
576 skb_record_rx_queue(skb, rx_ring->index);
577 skb->protocol = eth_type_trans(skb, rx_ring->ndev);
580 static bool enetc_page_reusable(struct page *page)
582 return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
585 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
586 struct enetc_rx_swbd *old)
588 struct enetc_rx_swbd *new;
590 new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
592 /* next buf that may reuse a page */
593 enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
595 /* copy page reference */
599 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
602 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
604 dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
605 rx_swbd->page_offset,
606 size, DMA_FROM_DEVICE);
610 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
611 struct enetc_rx_swbd *rx_swbd)
613 if (likely(enetc_page_reusable(rx_swbd->page))) {
614 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
615 page_ref_inc(rx_swbd->page);
617 enetc_reuse_page(rx_ring, rx_swbd);
619 /* sync for use by the device */
620 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
621 rx_swbd->page_offset,
625 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
626 PAGE_SIZE, DMA_FROM_DEVICE);
629 rx_swbd->page = NULL;
632 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
635 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
639 ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
640 skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
641 if (unlikely(!skb)) {
642 rx_ring->stats.rx_alloc_errs++;
646 skb_reserve(skb, ENETC_RXB_PAD);
647 __skb_put(skb, size);
649 enetc_put_rx_buff(rx_ring, rx_swbd);
654 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
655 u16 size, struct sk_buff *skb)
657 struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
659 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
660 rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
662 enetc_put_rx_buff(rx_ring, rx_swbd);
665 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
667 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
668 struct napi_struct *napi, int work_limit)
670 int rx_frm_cnt = 0, rx_byte_cnt = 0;
673 cleaned_cnt = enetc_bd_unused(rx_ring);
674 /* next descriptor to process */
675 i = rx_ring->next_to_clean;
677 while (likely(rx_frm_cnt < work_limit)) {
678 union enetc_rx_bd *rxbd;
683 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
684 int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
686 /* update ENETC's consumer index */
687 enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
688 cleaned_cnt -= count;
691 rxbd = enetc_rxbd(rx_ring, i);
692 bd_status = le32_to_cpu(rxbd->r.lstatus);
696 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
697 dma_rmb(); /* for reading other rxbd fields */
698 size = le16_to_cpu(rxbd->r.buf_len);
699 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
703 enetc_get_offloads(rx_ring, rxbd, skb);
707 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
708 if (unlikely(++i == rx_ring->bd_count))
711 if (unlikely(bd_status &
712 ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
714 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
716 bd_status = le32_to_cpu(rxbd->r.lstatus);
718 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
719 if (unlikely(++i == rx_ring->bd_count))
723 rx_ring->ndev->stats.rx_dropped++;
724 rx_ring->ndev->stats.rx_errors++;
729 /* not last BD in frame? */
730 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
731 bd_status = le32_to_cpu(rxbd->r.lstatus);
732 size = ENETC_RXB_DMA_SIZE;
734 if (bd_status & ENETC_RXBD_LSTATUS_F) {
736 size = le16_to_cpu(rxbd->r.buf_len);
739 enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
743 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
744 if (unlikely(++i == rx_ring->bd_count))
748 rx_byte_cnt += skb->len;
750 enetc_process_skb(rx_ring, skb);
752 napi_gro_receive(napi, skb);
757 rx_ring->next_to_clean = i;
759 rx_ring->stats.packets += rx_frm_cnt;
760 rx_ring->stats.bytes += rx_byte_cnt;
765 /* Probing and Init */
766 #define ENETC_MAX_RFS_SIZE 64
767 void enetc_get_si_caps(struct enetc_si *si)
769 struct enetc_hw *hw = &si->hw;
772 /* find out how many of various resources we have to work with */
773 val = enetc_rd(hw, ENETC_SICAPR0);
774 si->num_rx_rings = (val >> 16) & 0xff;
775 si->num_tx_rings = val & 0xff;
777 val = enetc_rd(hw, ENETC_SIRFSCAPR);
778 si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
779 si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
782 val = enetc_rd(hw, ENETC_SIPCAPR0);
783 if (val & ENETC_SIPCAPR0_RSS) {
786 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
787 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
790 if (val & ENETC_SIPCAPR0_QBV)
791 si->hw_features |= ENETC_SI_F_QBV;
793 if (val & ENETC_SIPCAPR0_PSFP)
794 si->hw_features |= ENETC_SI_F_PSFP;
797 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
799 r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
800 &r->bd_dma_base, GFP_KERNEL);
804 /* h/w requires 128B alignment */
805 if (!IS_ALIGNED(r->bd_dma_base, 128)) {
806 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
814 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
818 txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
822 err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
828 txr->next_to_clean = 0;
829 txr->next_to_use = 0;
834 static void enetc_free_txbdr(struct enetc_bdr *txr)
838 for (i = 0; i < txr->bd_count; i++)
839 enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
841 size = txr->bd_count * sizeof(union enetc_tx_bd);
843 dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
850 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
854 for (i = 0; i < priv->num_tx_rings; i++) {
855 err = enetc_alloc_txbdr(priv->tx_ring[i]);
865 enetc_free_txbdr(priv->tx_ring[i]);
870 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
874 for (i = 0; i < priv->num_tx_rings; i++)
875 enetc_free_txbdr(priv->tx_ring[i]);
878 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
880 size_t size = sizeof(union enetc_rx_bd);
883 rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
890 err = enetc_dma_alloc_bdr(rxr, size);
896 rxr->next_to_clean = 0;
897 rxr->next_to_use = 0;
898 rxr->next_to_alloc = 0;
899 rxr->ext_en = extended;
904 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
908 size = rxr->bd_count * sizeof(union enetc_rx_bd);
910 dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
917 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
919 bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
922 for (i = 0; i < priv->num_rx_rings; i++) {
923 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
933 enetc_free_rxbdr(priv->rx_ring[i]);
938 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
942 for (i = 0; i < priv->num_rx_rings; i++)
943 enetc_free_rxbdr(priv->rx_ring[i]);
946 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
950 if (!tx_ring->tx_swbd)
953 for (i = 0; i < tx_ring->bd_count; i++) {
954 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
956 enetc_free_tx_skb(tx_ring, tx_swbd);
959 tx_ring->next_to_clean = 0;
960 tx_ring->next_to_use = 0;
963 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
967 if (!rx_ring->rx_swbd)
970 for (i = 0; i < rx_ring->bd_count; i++) {
971 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
976 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
977 PAGE_SIZE, DMA_FROM_DEVICE);
978 __free_page(rx_swbd->page);
979 rx_swbd->page = NULL;
982 rx_ring->next_to_clean = 0;
983 rx_ring->next_to_use = 0;
984 rx_ring->next_to_alloc = 0;
987 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
991 for (i = 0; i < priv->num_rx_rings; i++)
992 enetc_free_rx_ring(priv->rx_ring[i]);
994 for (i = 0; i < priv->num_tx_rings; i++)
995 enetc_free_tx_ring(priv->tx_ring[i]);
998 int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1000 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1002 cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
1007 /* h/w requires 128B alignment */
1008 if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
1009 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1013 cbdr->next_to_clean = 0;
1014 cbdr->next_to_use = 0;
1019 void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1021 int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1023 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1024 cbdr->bd_base = NULL;
1027 void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1029 /* set CBDR cache attributes */
1030 enetc_wr(hw, ENETC_SICAR2,
1031 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1033 enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
1034 enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
1035 enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
1037 enetc_wr(hw, ENETC_SICBDRPIR, 0);
1038 enetc_wr(hw, ENETC_SICBDRCIR, 0);
1041 enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
1043 cbdr->pir = hw->reg + ENETC_SICBDRPIR;
1044 cbdr->cir = hw->reg + ENETC_SICBDRCIR;
1047 void enetc_clear_cbdr(struct enetc_hw *hw)
1049 enetc_wr(hw, ENETC_SICBDRMR, 0);
1052 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1057 rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1061 /* Set up RSS table defaults */
1062 for (i = 0; i < si->num_rss; i++)
1063 rss_table[i] = i % num_groups;
1065 enetc_set_rss_table(si, rss_table, si->num_rss);
1072 int enetc_configure_si(struct enetc_ndev_priv *priv)
1074 struct enetc_si *si = priv->si;
1075 struct enetc_hw *hw = &si->hw;
1078 /* set SI cache attributes */
1079 enetc_wr(hw, ENETC_SICAR0,
1080 ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1081 enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1083 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1086 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1094 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1096 struct enetc_si *si = priv->si;
1097 int cpus = num_online_cpus();
1099 priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1100 priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1102 /* Enable all available TX rings in order to configure as many
1103 * priorities as possible, when needed.
1104 * TODO: Make # of TX rings run-time configurable
1106 priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1107 priv->num_tx_rings = si->num_tx_rings;
1108 priv->bdr_int_num = cpus;
1109 priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1110 priv->tx_ictt = ENETC_TXIC_TIMETHR;
1113 si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
1116 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1118 struct enetc_si *si = priv->si;
1121 err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
1125 enetc_setup_cbdr(&si->hw, &si->cbd_ring);
1127 priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1129 if (!priv->cls_rules) {
1137 enetc_clear_cbdr(&si->hw);
1138 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1143 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1145 struct enetc_si *si = priv->si;
1147 enetc_clear_cbdr(&si->hw);
1148 enetc_free_cbdr(priv->dev, &si->cbd_ring);
1150 kfree(priv->cls_rules);
1153 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1155 int idx = tx_ring->index;
1158 enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1159 lower_32_bits(tx_ring->bd_dma_base));
1161 enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1162 upper_32_bits(tx_ring->bd_dma_base));
1164 WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1165 enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1166 ENETC_RTBLENR_LEN(tx_ring->bd_count));
1168 /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1169 tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1170 tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1172 /* enable Tx ints by setting pkt thr to 1 */
1173 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1175 tbmr = ENETC_TBMR_EN;
1176 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1177 tbmr |= ENETC_TBMR_VIH;
1180 enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1182 tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1183 tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1184 tx_ring->idr = hw->reg + ENETC_SITXIDR;
1187 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1189 int idx = rx_ring->index;
1192 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1193 lower_32_bits(rx_ring->bd_dma_base));
1195 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1196 upper_32_bits(rx_ring->bd_dma_base));
1198 WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1199 enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1200 ENETC_RTBLENR_LEN(rx_ring->bd_count));
1202 enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1204 enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1206 /* enable Rx ints by setting pkt thr to 1 */
1207 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1209 rbmr = ENETC_RBMR_EN;
1211 if (rx_ring->ext_en)
1212 rbmr |= ENETC_RBMR_BDS;
1214 if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1215 rbmr |= ENETC_RBMR_VTE;
1217 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1218 rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1220 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1221 /* update ENETC's consumer index */
1222 enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
1225 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1228 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1232 for (i = 0; i < priv->num_tx_rings; i++)
1233 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1235 for (i = 0; i < priv->num_rx_rings; i++)
1236 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1239 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1241 int idx = rx_ring->index;
1243 /* disable EN bit on ring */
1244 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1247 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1249 int delay = 8, timeout = 100;
1250 int idx = tx_ring->index;
1252 /* disable EN bit on ring */
1253 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1255 /* wait for busy to clear */
1256 while (delay < timeout &&
1257 enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1262 if (delay >= timeout)
1263 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1267 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1271 for (i = 0; i < priv->num_tx_rings; i++)
1272 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1274 for (i = 0; i < priv->num_rx_rings; i++)
1275 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1280 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1282 struct pci_dev *pdev = priv->si->pdev;
1286 for (i = 0; i < priv->bdr_int_num; i++) {
1287 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1288 struct enetc_int_vector *v = priv->int_vector[i];
1289 int entry = ENETC_BDR_INT_BASE_IDX + i;
1290 struct enetc_hw *hw = &priv->si->hw;
1292 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1293 priv->ndev->name, i);
1294 err = request_irq(irq, enetc_msix, 0, v->name, v);
1296 dev_err(priv->dev, "request_irq() failed!\n");
1301 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1302 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1303 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1305 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1307 for (j = 0; j < v->count_tx_rings; j++) {
1308 int idx = v->tx_ring[j].index;
1310 enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1312 cpumask_clear(&cpu_mask);
1313 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1314 irq_set_affinity_hint(irq, &cpu_mask);
1321 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1323 irq_set_affinity_hint(irq, NULL);
1324 free_irq(irq, priv->int_vector[i]);
1330 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1332 struct pci_dev *pdev = priv->si->pdev;
1335 for (i = 0; i < priv->bdr_int_num; i++) {
1336 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1338 irq_set_affinity_hint(irq, NULL);
1339 free_irq(irq, priv->int_vector[i]);
1343 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1345 struct enetc_hw *hw = &priv->si->hw;
1349 /* enable Tx & Rx event indication */
1351 (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1352 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1353 /* init to non-0 minimum, will be adjusted later */
1356 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
1360 for (i = 0; i < priv->num_rx_rings; i++) {
1361 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1362 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1363 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1366 if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1367 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1369 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
1371 for (i = 0; i < priv->num_tx_rings; i++) {
1372 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1373 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1374 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1378 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1382 for (i = 0; i < priv->num_tx_rings; i++)
1383 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1385 for (i = 0; i < priv->num_rx_rings; i++)
1386 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1389 static int enetc_phylink_connect(struct net_device *ndev)
1391 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1392 struct ethtool_eee edata;
1396 return 0; /* phy-less mode */
1398 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
1400 dev_err(&ndev->dev, "could not attach to PHY\n");
1404 /* disable EEE autoneg, until ENETC driver supports it */
1405 memset(&edata, 0, sizeof(struct ethtool_eee));
1406 phylink_ethtool_set_eee(priv->phylink, &edata);
1411 void enetc_start(struct net_device *ndev)
1413 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1416 enetc_setup_interrupts(priv);
1418 for (i = 0; i < priv->bdr_int_num; i++) {
1419 int irq = pci_irq_vector(priv->si->pdev,
1420 ENETC_BDR_INT_BASE_IDX + i);
1422 napi_enable(&priv->int_vector[i]->napi);
1427 phylink_start(priv->phylink);
1429 netif_carrier_on(ndev);
1431 netif_tx_start_all_queues(ndev);
1434 int enetc_open(struct net_device *ndev)
1436 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1439 err = enetc_setup_irqs(priv);
1443 err = enetc_phylink_connect(ndev);
1445 goto err_phy_connect;
1447 err = enetc_alloc_tx_resources(priv);
1451 err = enetc_alloc_rx_resources(priv);
1455 err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1457 goto err_set_queues;
1459 err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1461 goto err_set_queues;
1463 enetc_setup_bdrs(priv);
1469 enetc_free_rx_resources(priv);
1471 enetc_free_tx_resources(priv);
1474 phylink_disconnect_phy(priv->phylink);
1476 enetc_free_irqs(priv);
1481 void enetc_stop(struct net_device *ndev)
1483 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1486 netif_tx_stop_all_queues(ndev);
1488 for (i = 0; i < priv->bdr_int_num; i++) {
1489 int irq = pci_irq_vector(priv->si->pdev,
1490 ENETC_BDR_INT_BASE_IDX + i);
1493 napi_synchronize(&priv->int_vector[i]->napi);
1494 napi_disable(&priv->int_vector[i]->napi);
1498 phylink_stop(priv->phylink);
1500 netif_carrier_off(ndev);
1502 enetc_clear_interrupts(priv);
1505 int enetc_close(struct net_device *ndev)
1507 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1510 enetc_clear_bdrs(priv);
1513 phylink_disconnect_phy(priv->phylink);
1514 enetc_free_rxtx_rings(priv);
1515 enetc_free_rx_resources(priv);
1516 enetc_free_tx_resources(priv);
1517 enetc_free_irqs(priv);
1522 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
1524 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1525 struct tc_mqprio_qopt *mqprio = type_data;
1526 struct enetc_bdr *tx_ring;
1530 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1531 num_tc = mqprio->num_tc;
1534 netdev_reset_tc(ndev);
1535 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1537 /* Reset all ring priorities to 0 */
1538 for (i = 0; i < priv->num_tx_rings; i++) {
1539 tx_ring = priv->tx_ring[i];
1540 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
1546 /* Check if we have enough BD rings available to accommodate all TCs */
1547 if (num_tc > priv->num_tx_rings) {
1548 netdev_err(ndev, "Max %d traffic classes supported\n",
1549 priv->num_tx_rings);
1553 /* For the moment, we use only one BD ring per TC.
1555 * Configure num_tc BD rings with increasing priorities.
1557 for (i = 0; i < num_tc; i++) {
1558 tx_ring = priv->tx_ring[i];
1559 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
1562 /* Reset the number of netdev queues based on the TC count */
1563 netif_set_real_num_tx_queues(ndev, num_tc);
1565 netdev_set_num_tc(ndev, num_tc);
1567 /* Each TC is associated with one netdev queue */
1568 for (i = 0; i < num_tc; i++)
1569 netdev_set_tc_queue(ndev, i, 1, i);
1574 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1578 case TC_SETUP_QDISC_MQPRIO:
1579 return enetc_setup_tc_mqprio(ndev, type_data);
1580 case TC_SETUP_QDISC_TAPRIO:
1581 return enetc_setup_tc_taprio(ndev, type_data);
1582 case TC_SETUP_QDISC_CBS:
1583 return enetc_setup_tc_cbs(ndev, type_data);
1584 case TC_SETUP_QDISC_ETF:
1585 return enetc_setup_tc_txtime(ndev, type_data);
1586 case TC_SETUP_BLOCK:
1587 return enetc_setup_tc_psfp(ndev, type_data);
1593 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1595 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1596 struct net_device_stats *stats = &ndev->stats;
1597 unsigned long packets = 0, bytes = 0;
1600 for (i = 0; i < priv->num_rx_rings; i++) {
1601 packets += priv->rx_ring[i]->stats.packets;
1602 bytes += priv->rx_ring[i]->stats.bytes;
1605 stats->rx_packets = packets;
1606 stats->rx_bytes = bytes;
1610 for (i = 0; i < priv->num_tx_rings; i++) {
1611 packets += priv->tx_ring[i]->stats.packets;
1612 bytes += priv->tx_ring[i]->stats.bytes;
1615 stats->tx_packets = packets;
1616 stats->tx_bytes = bytes;
1621 static int enetc_set_rss(struct net_device *ndev, int en)
1623 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1624 struct enetc_hw *hw = &priv->si->hw;
1627 enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1629 reg = enetc_rd(hw, ENETC_SIMR);
1630 reg &= ~ENETC_SIMR_RSSE;
1631 reg |= (en) ? ENETC_SIMR_RSSE : 0;
1632 enetc_wr(hw, ENETC_SIMR, reg);
1637 static int enetc_set_psfp(struct net_device *ndev, int en)
1639 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1643 err = enetc_psfp_enable(priv);
1647 priv->active_offloads |= ENETC_F_QCI;
1651 err = enetc_psfp_disable(priv);
1655 priv->active_offloads &= ~ENETC_F_QCI;
1660 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
1662 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1665 for (i = 0; i < priv->num_rx_rings; i++)
1666 enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
1669 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
1671 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1674 for (i = 0; i < priv->num_tx_rings; i++)
1675 enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
1678 int enetc_set_features(struct net_device *ndev,
1679 netdev_features_t features)
1681 netdev_features_t changed = ndev->features ^ features;
1684 if (changed & NETIF_F_RXHASH)
1685 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1687 if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1688 enetc_enable_rxvlan(ndev,
1689 !!(features & NETIF_F_HW_VLAN_CTAG_RX));
1691 if (changed & NETIF_F_HW_VLAN_CTAG_TX)
1692 enetc_enable_txvlan(ndev,
1693 !!(features & NETIF_F_HW_VLAN_CTAG_TX));
1695 if (changed & NETIF_F_HW_TC)
1696 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1701 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1702 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
1704 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1705 struct hwtstamp_config config;
1708 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1711 switch (config.tx_type) {
1712 case HWTSTAMP_TX_OFF:
1713 priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
1715 case HWTSTAMP_TX_ON:
1716 priv->active_offloads |= ENETC_F_TX_TSTAMP;
1722 ao = priv->active_offloads;
1723 switch (config.rx_filter) {
1724 case HWTSTAMP_FILTER_NONE:
1725 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
1728 priv->active_offloads |= ENETC_F_RX_TSTAMP;
1729 config.rx_filter = HWTSTAMP_FILTER_ALL;
1732 if (netif_running(ndev) && ao != priv->active_offloads) {
1737 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1741 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
1743 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1744 struct hwtstamp_config config;
1748 if (priv->active_offloads & ENETC_F_TX_TSTAMP)
1749 config.tx_type = HWTSTAMP_TX_ON;
1751 config.tx_type = HWTSTAMP_TX_OFF;
1753 config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
1754 HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1756 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1761 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1763 struct enetc_ndev_priv *priv = netdev_priv(ndev);
1764 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1765 if (cmd == SIOCSHWTSTAMP)
1766 return enetc_hwtstamp_set(ndev, rq);
1767 if (cmd == SIOCGHWTSTAMP)
1768 return enetc_hwtstamp_get(ndev, rq);
1774 return phylink_mii_ioctl(priv->phylink, rq, cmd);
1777 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1779 struct pci_dev *pdev = priv->si->pdev;
1781 int i, n, err, nvec;
1783 nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1784 /* allocate MSIX for both messaging and Rx/Tx interrupts */
1785 n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1793 /* # of tx rings per int vector */
1794 v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1796 for (i = 0; i < priv->bdr_int_num; i++) {
1797 struct enetc_int_vector *v;
1798 struct enetc_bdr *bdr;
1801 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
1807 priv->int_vector[i] = v;
1809 /* init defaults for adaptive IC */
1810 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
1812 v->rx_dim_en = true;
1814 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
1815 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1817 v->count_tx_rings = v_tx_rings;
1819 for (j = 0; j < v_tx_rings; j++) {
1822 /* default tx ring mapping policy */
1823 if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1824 idx = 2 * j + i; /* 2 CPUs */
1826 idx = j + i * v_tx_rings; /* default */
1828 __set_bit(idx, &v->tx_rings_map);
1829 bdr = &v->tx_ring[j];
1831 bdr->ndev = priv->ndev;
1832 bdr->dev = priv->dev;
1833 bdr->bd_count = priv->tx_bd_count;
1834 priv->tx_ring[idx] = bdr;
1839 bdr->ndev = priv->ndev;
1840 bdr->dev = priv->dev;
1841 bdr->bd_count = priv->rx_bd_count;
1842 priv->rx_ring[i] = bdr;
1849 netif_napi_del(&priv->int_vector[i]->napi);
1850 cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
1851 kfree(priv->int_vector[i]);
1854 pci_free_irq_vectors(pdev);
1859 void enetc_free_msix(struct enetc_ndev_priv *priv)
1863 for (i = 0; i < priv->bdr_int_num; i++) {
1864 struct enetc_int_vector *v = priv->int_vector[i];
1866 netif_napi_del(&v->napi);
1867 cancel_work_sync(&v->rx_dim.work);
1870 for (i = 0; i < priv->num_rx_rings; i++)
1871 priv->rx_ring[i] = NULL;
1873 for (i = 0; i < priv->num_tx_rings; i++)
1874 priv->tx_ring[i] = NULL;
1876 for (i = 0; i < priv->bdr_int_num; i++) {
1877 kfree(priv->int_vector[i]);
1878 priv->int_vector[i] = NULL;
1881 /* disable all MSIX for this device */
1882 pci_free_irq_vectors(priv->si->pdev);
1885 static void enetc_kfree_si(struct enetc_si *si)
1887 char *p = (char *)si - si->pad;
1892 static void enetc_detect_errata(struct enetc_si *si)
1894 if (si->pdev->revision == ENETC_REV1)
1895 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
1898 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1900 struct enetc_si *si, *p;
1901 struct enetc_hw *hw;
1906 err = pci_enable_device_mem(pdev);
1908 dev_err(&pdev->dev, "device enable failed\n");
1912 /* set up for high or low dma */
1913 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1915 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1918 "DMA configuration failed: 0x%x\n", err);
1923 err = pci_request_mem_regions(pdev, name);
1925 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1926 goto err_pci_mem_reg;
1929 pci_set_master(pdev);
1931 alloc_size = sizeof(struct enetc_si);
1933 /* align priv to 32B */
1934 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1935 alloc_size += sizeof_priv;
1937 /* force 32B alignment for enetc_si */
1938 alloc_size += ENETC_SI_ALIGN - 1;
1940 p = kzalloc(alloc_size, GFP_KERNEL);
1946 si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1947 si->pad = (char *)si - (char *)p;
1949 pci_set_drvdata(pdev, si);
1953 len = pci_resource_len(pdev, ENETC_BAR_REGS);
1954 hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1957 dev_err(&pdev->dev, "ioremap() failed\n");
1960 if (len > ENETC_PORT_BASE)
1961 hw->port = hw->reg + ENETC_PORT_BASE;
1962 if (len > ENETC_GLOBAL_BASE)
1963 hw->global = hw->reg + ENETC_GLOBAL_BASE;
1965 enetc_detect_errata(si);
1972 pci_release_mem_regions(pdev);
1975 pci_disable_device(pdev);
1980 void enetc_pci_remove(struct pci_dev *pdev)
1982 struct enetc_si *si = pci_get_drvdata(pdev);
1983 struct enetc_hw *hw = &si->hw;
1987 pci_release_mem_regions(pdev);
1988 pci_disable_device(pdev);