Merge tag 'devprop-5.12-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-block.git] / drivers / net / ethernet / freescale / enetc / enetc.c
1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2017-2019 NXP */
3
4 #include "enetc.h"
5 #include <linux/tcp.h>
6 #include <linux/udp.h>
7 #include <linux/vmalloc.h>
8
9 /* ENETC overhead: optional extension BD + 1 BD gap */
10 #define ENETC_TXBDS_NEEDED(val) ((val) + 2)
11 /* max # of chained Tx BDs is 15, including head and extension BD */
12 #define ENETC_MAX_SKB_FRAGS     13
13 #define ENETC_TXBDS_MAX_NEEDED  ENETC_TXBDS_NEEDED(ENETC_MAX_SKB_FRAGS + 1)
14
15 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
16                               int active_offloads);
17
18 netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev)
19 {
20         struct enetc_ndev_priv *priv = netdev_priv(ndev);
21         struct enetc_bdr *tx_ring;
22         int count;
23
24         tx_ring = priv->tx_ring[skb->queue_mapping];
25
26         if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
27                 if (unlikely(skb_linearize(skb)))
28                         goto drop_packet_err;
29
30         count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
31         if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
32                 netif_stop_subqueue(ndev, tx_ring->index);
33                 return NETDEV_TX_BUSY;
34         }
35
36         enetc_lock_mdio();
37         count = enetc_map_tx_buffs(tx_ring, skb, priv->active_offloads);
38         enetc_unlock_mdio();
39
40         if (unlikely(!count))
41                 goto drop_packet_err;
42
43         if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_MAX_NEEDED)
44                 netif_stop_subqueue(ndev, tx_ring->index);
45
46         return NETDEV_TX_OK;
47
48 drop_packet_err:
49         dev_kfree_skb_any(skb);
50         return NETDEV_TX_OK;
51 }
52
53 static void enetc_unmap_tx_buff(struct enetc_bdr *tx_ring,
54                                 struct enetc_tx_swbd *tx_swbd)
55 {
56         if (tx_swbd->is_dma_page)
57                 dma_unmap_page(tx_ring->dev, tx_swbd->dma,
58                                tx_swbd->len, DMA_TO_DEVICE);
59         else
60                 dma_unmap_single(tx_ring->dev, tx_swbd->dma,
61                                  tx_swbd->len, DMA_TO_DEVICE);
62         tx_swbd->dma = 0;
63 }
64
65 static void enetc_free_tx_skb(struct enetc_bdr *tx_ring,
66                               struct enetc_tx_swbd *tx_swbd)
67 {
68         if (tx_swbd->dma)
69                 enetc_unmap_tx_buff(tx_ring, tx_swbd);
70
71         if (tx_swbd->skb) {
72                 dev_kfree_skb_any(tx_swbd->skb);
73                 tx_swbd->skb = NULL;
74         }
75 }
76
77 static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb,
78                               int active_offloads)
79 {
80         struct enetc_tx_swbd *tx_swbd;
81         skb_frag_t *frag;
82         int len = skb_headlen(skb);
83         union enetc_tx_bd temp_bd;
84         union enetc_tx_bd *txbd;
85         bool do_vlan, do_tstamp;
86         int i, count = 0;
87         unsigned int f;
88         dma_addr_t dma;
89         u8 flags = 0;
90
91         i = tx_ring->next_to_use;
92         txbd = ENETC_TXBD(*tx_ring, i);
93         prefetchw(txbd);
94
95         dma = dma_map_single(tx_ring->dev, skb->data, len, DMA_TO_DEVICE);
96         if (unlikely(dma_mapping_error(tx_ring->dev, dma)))
97                 goto dma_err;
98
99         temp_bd.addr = cpu_to_le64(dma);
100         temp_bd.buf_len = cpu_to_le16(len);
101         temp_bd.lstatus = 0;
102
103         tx_swbd = &tx_ring->tx_swbd[i];
104         tx_swbd->dma = dma;
105         tx_swbd->len = len;
106         tx_swbd->is_dma_page = 0;
107         count++;
108
109         do_vlan = skb_vlan_tag_present(skb);
110         do_tstamp = (active_offloads & ENETC_F_TX_TSTAMP) &&
111                     (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP);
112         tx_swbd->do_tstamp = do_tstamp;
113         tx_swbd->check_wb = tx_swbd->do_tstamp;
114
115         if (do_vlan || do_tstamp)
116                 flags |= ENETC_TXBD_FLAGS_EX;
117
118         if (tx_ring->tsd_enable)
119                 flags |= ENETC_TXBD_FLAGS_TSE | ENETC_TXBD_FLAGS_TXSTART;
120
121         /* first BD needs frm_len and offload flags set */
122         temp_bd.frm_len = cpu_to_le16(skb->len);
123         temp_bd.flags = flags;
124
125         if (flags & ENETC_TXBD_FLAGS_TSE)
126                 temp_bd.txstart = enetc_txbd_set_tx_start(skb->skb_mstamp_ns,
127                                                           flags);
128
129         if (flags & ENETC_TXBD_FLAGS_EX) {
130                 u8 e_flags = 0;
131                 *txbd = temp_bd;
132                 enetc_clear_tx_bd(&temp_bd);
133
134                 /* add extension BD for VLAN and/or timestamping */
135                 flags = 0;
136                 tx_swbd++;
137                 txbd++;
138                 i++;
139                 if (unlikely(i == tx_ring->bd_count)) {
140                         i = 0;
141                         tx_swbd = tx_ring->tx_swbd;
142                         txbd = ENETC_TXBD(*tx_ring, 0);
143                 }
144                 prefetchw(txbd);
145
146                 if (do_vlan) {
147                         temp_bd.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
148                         temp_bd.ext.tpid = 0; /* < C-TAG */
149                         e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
150                 }
151
152                 if (do_tstamp) {
153                         skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
154                         e_flags |= ENETC_TXBD_E_FLAGS_TWO_STEP_PTP;
155                 }
156
157                 temp_bd.ext.e_flags = e_flags;
158                 count++;
159         }
160
161         frag = &skb_shinfo(skb)->frags[0];
162         for (f = 0; f < skb_shinfo(skb)->nr_frags; f++, frag++) {
163                 len = skb_frag_size(frag);
164                 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, len,
165                                        DMA_TO_DEVICE);
166                 if (dma_mapping_error(tx_ring->dev, dma))
167                         goto dma_err;
168
169                 *txbd = temp_bd;
170                 enetc_clear_tx_bd(&temp_bd);
171
172                 flags = 0;
173                 tx_swbd++;
174                 txbd++;
175                 i++;
176                 if (unlikely(i == tx_ring->bd_count)) {
177                         i = 0;
178                         tx_swbd = tx_ring->tx_swbd;
179                         txbd = ENETC_TXBD(*tx_ring, 0);
180                 }
181                 prefetchw(txbd);
182
183                 temp_bd.addr = cpu_to_le64(dma);
184                 temp_bd.buf_len = cpu_to_le16(len);
185
186                 tx_swbd->dma = dma;
187                 tx_swbd->len = len;
188                 tx_swbd->is_dma_page = 1;
189                 count++;
190         }
191
192         /* last BD needs 'F' bit set */
193         flags |= ENETC_TXBD_FLAGS_F;
194         temp_bd.flags = flags;
195         *txbd = temp_bd;
196
197         tx_ring->tx_swbd[i].skb = skb;
198
199         enetc_bdr_idx_inc(tx_ring, &i);
200         tx_ring->next_to_use = i;
201
202         skb_tx_timestamp(skb);
203
204         /* let H/W know BD ring has been updated */
205         enetc_wr_reg_hot(tx_ring->tpir, i); /* includes wmb() */
206
207         return count;
208
209 dma_err:
210         dev_err(tx_ring->dev, "DMA map error");
211
212         do {
213                 tx_swbd = &tx_ring->tx_swbd[i];
214                 enetc_free_tx_skb(tx_ring, tx_swbd);
215                 if (i == 0)
216                         i = tx_ring->bd_count;
217                 i--;
218         } while (count--);
219
220         return 0;
221 }
222
223 static irqreturn_t enetc_msix(int irq, void *data)
224 {
225         struct enetc_int_vector *v = data;
226         int i;
227
228         enetc_lock_mdio();
229
230         /* disable interrupts */
231         enetc_wr_reg_hot(v->rbier, 0);
232         enetc_wr_reg_hot(v->ricr1, v->rx_ictt);
233
234         for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
235                 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i), 0);
236
237         enetc_unlock_mdio();
238
239         napi_schedule(&v->napi);
240
241         return IRQ_HANDLED;
242 }
243
244 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget);
245 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
246                                struct napi_struct *napi, int work_limit);
247
248 static void enetc_rx_dim_work(struct work_struct *w)
249 {
250         struct dim *dim = container_of(w, struct dim, work);
251         struct dim_cq_moder moder =
252                 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
253         struct enetc_int_vector *v =
254                 container_of(dim, struct enetc_int_vector, rx_dim);
255
256         v->rx_ictt = enetc_usecs_to_cycles(moder.usec);
257         dim->state = DIM_START_MEASURE;
258 }
259
260 static void enetc_rx_net_dim(struct enetc_int_vector *v)
261 {
262         struct dim_sample dim_sample;
263
264         v->comp_cnt++;
265
266         if (!v->rx_napi_work)
267                 return;
268
269         dim_update_sample(v->comp_cnt,
270                           v->rx_ring.stats.packets,
271                           v->rx_ring.stats.bytes,
272                           &dim_sample);
273         net_dim(&v->rx_dim, dim_sample);
274 }
275
276 static int enetc_poll(struct napi_struct *napi, int budget)
277 {
278         struct enetc_int_vector
279                 *v = container_of(napi, struct enetc_int_vector, napi);
280         bool complete = true;
281         int work_done;
282         int i;
283
284         enetc_lock_mdio();
285
286         for (i = 0; i < v->count_tx_rings; i++)
287                 if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
288                         complete = false;
289
290         work_done = enetc_clean_rx_ring(&v->rx_ring, napi, budget);
291         if (work_done == budget)
292                 complete = false;
293         if (work_done)
294                 v->rx_napi_work = true;
295
296         if (!complete) {
297                 enetc_unlock_mdio();
298                 return budget;
299         }
300
301         napi_complete_done(napi, work_done);
302
303         if (likely(v->rx_dim_en))
304                 enetc_rx_net_dim(v);
305
306         v->rx_napi_work = false;
307
308         /* enable interrupts */
309         enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
310
311         for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
312                 enetc_wr_reg_hot(v->tbier_base + ENETC_BDR_OFF(i),
313                                  ENETC_TBIER_TXTIE);
314
315         enetc_unlock_mdio();
316
317         return work_done;
318 }
319
320 static int enetc_bd_ready_count(struct enetc_bdr *tx_ring, int ci)
321 {
322         int pi = enetc_rd_reg_hot(tx_ring->tcir) & ENETC_TBCIR_IDX_MASK;
323
324         return pi >= ci ? pi - ci : tx_ring->bd_count - ci + pi;
325 }
326
327 static void enetc_get_tx_tstamp(struct enetc_hw *hw, union enetc_tx_bd *txbd,
328                                 u64 *tstamp)
329 {
330         u32 lo, hi, tstamp_lo;
331
332         lo = enetc_rd_hot(hw, ENETC_SICTR0);
333         hi = enetc_rd_hot(hw, ENETC_SICTR1);
334         tstamp_lo = le32_to_cpu(txbd->wb.tstamp);
335         if (lo <= tstamp_lo)
336                 hi -= 1;
337         *tstamp = (u64)hi << 32 | tstamp_lo;
338 }
339
340 static void enetc_tstamp_tx(struct sk_buff *skb, u64 tstamp)
341 {
342         struct skb_shared_hwtstamps shhwtstamps;
343
344         if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) {
345                 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
346                 shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
347                 /* Ensure skb_mstamp_ns, which might have been populated with
348                  * the txtime, is not mistaken for a software timestamp,
349                  * because this will prevent the dispatch of our hardware
350                  * timestamp to the socket.
351                  */
352                 skb->tstamp = ktime_set(0, 0);
353                 skb_tstamp_tx(skb, &shhwtstamps);
354         }
355 }
356
357 static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
358 {
359         struct net_device *ndev = tx_ring->ndev;
360         int tx_frm_cnt = 0, tx_byte_cnt = 0;
361         struct enetc_tx_swbd *tx_swbd;
362         int i, bds_to_clean;
363         bool do_tstamp;
364         u64 tstamp = 0;
365
366         i = tx_ring->next_to_clean;
367         tx_swbd = &tx_ring->tx_swbd[i];
368
369         bds_to_clean = enetc_bd_ready_count(tx_ring, i);
370
371         do_tstamp = false;
372
373         while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) {
374                 bool is_eof = !!tx_swbd->skb;
375
376                 if (unlikely(tx_swbd->check_wb)) {
377                         struct enetc_ndev_priv *priv = netdev_priv(ndev);
378                         union enetc_tx_bd *txbd;
379
380                         txbd = ENETC_TXBD(*tx_ring, i);
381
382                         if (txbd->flags & ENETC_TXBD_FLAGS_W &&
383                             tx_swbd->do_tstamp) {
384                                 enetc_get_tx_tstamp(&priv->si->hw, txbd,
385                                                     &tstamp);
386                                 do_tstamp = true;
387                         }
388                 }
389
390                 if (likely(tx_swbd->dma))
391                         enetc_unmap_tx_buff(tx_ring, tx_swbd);
392
393                 if (is_eof) {
394                         if (unlikely(do_tstamp)) {
395                                 enetc_tstamp_tx(tx_swbd->skb, tstamp);
396                                 do_tstamp = false;
397                         }
398                         napi_consume_skb(tx_swbd->skb, napi_budget);
399                         tx_swbd->skb = NULL;
400                 }
401
402                 tx_byte_cnt += tx_swbd->len;
403
404                 bds_to_clean--;
405                 tx_swbd++;
406                 i++;
407                 if (unlikely(i == tx_ring->bd_count)) {
408                         i = 0;
409                         tx_swbd = tx_ring->tx_swbd;
410                 }
411
412                 /* BD iteration loop end */
413                 if (is_eof) {
414                         tx_frm_cnt++;
415                         /* re-arm interrupt source */
416                         enetc_wr_reg_hot(tx_ring->idr, BIT(tx_ring->index) |
417                                          BIT(16 + tx_ring->index));
418                 }
419
420                 if (unlikely(!bds_to_clean))
421                         bds_to_clean = enetc_bd_ready_count(tx_ring, i);
422         }
423
424         tx_ring->next_to_clean = i;
425         tx_ring->stats.packets += tx_frm_cnt;
426         tx_ring->stats.bytes += tx_byte_cnt;
427
428         if (unlikely(tx_frm_cnt && netif_carrier_ok(ndev) &&
429                      __netif_subqueue_stopped(ndev, tx_ring->index) &&
430                      (enetc_bd_unused(tx_ring) >= ENETC_TXBDS_MAX_NEEDED))) {
431                 netif_wake_subqueue(ndev, tx_ring->index);
432         }
433
434         return tx_frm_cnt != ENETC_DEFAULT_TX_WORK;
435 }
436
437 static bool enetc_new_page(struct enetc_bdr *rx_ring,
438                            struct enetc_rx_swbd *rx_swbd)
439 {
440         struct page *page;
441         dma_addr_t addr;
442
443         page = dev_alloc_page();
444         if (unlikely(!page))
445                 return false;
446
447         addr = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
448         if (unlikely(dma_mapping_error(rx_ring->dev, addr))) {
449                 __free_page(page);
450
451                 return false;
452         }
453
454         rx_swbd->dma = addr;
455         rx_swbd->page = page;
456         rx_swbd->page_offset = ENETC_RXB_PAD;
457
458         return true;
459 }
460
461 static int enetc_refill_rx_ring(struct enetc_bdr *rx_ring, const int buff_cnt)
462 {
463         struct enetc_rx_swbd *rx_swbd;
464         union enetc_rx_bd *rxbd;
465         int i, j;
466
467         i = rx_ring->next_to_use;
468         rx_swbd = &rx_ring->rx_swbd[i];
469         rxbd = enetc_rxbd(rx_ring, i);
470
471         for (j = 0; j < buff_cnt; j++) {
472                 /* try reuse page */
473                 if (unlikely(!rx_swbd->page)) {
474                         if (unlikely(!enetc_new_page(rx_ring, rx_swbd))) {
475                                 rx_ring->stats.rx_alloc_errs++;
476                                 break;
477                         }
478                 }
479
480                 /* update RxBD */
481                 rxbd->w.addr = cpu_to_le64(rx_swbd->dma +
482                                            rx_swbd->page_offset);
483                 /* clear 'R" as well */
484                 rxbd->r.lstatus = 0;
485
486                 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
487                 rx_swbd++;
488                 i++;
489                 if (unlikely(i == rx_ring->bd_count)) {
490                         i = 0;
491                         rx_swbd = rx_ring->rx_swbd;
492                 }
493         }
494
495         if (likely(j)) {
496                 rx_ring->next_to_alloc = i; /* keep track from page reuse */
497                 rx_ring->next_to_use = i;
498         }
499
500         return j;
501 }
502
503 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
504 static void enetc_get_rx_tstamp(struct net_device *ndev,
505                                 union enetc_rx_bd *rxbd,
506                                 struct sk_buff *skb)
507 {
508         struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
509         struct enetc_ndev_priv *priv = netdev_priv(ndev);
510         struct enetc_hw *hw = &priv->si->hw;
511         u32 lo, hi, tstamp_lo;
512         u64 tstamp;
513
514         if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TSTMP) {
515                 lo = enetc_rd_reg_hot(hw->reg + ENETC_SICTR0);
516                 hi = enetc_rd_reg_hot(hw->reg + ENETC_SICTR1);
517                 rxbd = enetc_rxbd_ext(rxbd);
518                 tstamp_lo = le32_to_cpu(rxbd->ext.tstamp);
519                 if (lo <= tstamp_lo)
520                         hi -= 1;
521
522                 tstamp = (u64)hi << 32 | tstamp_lo;
523                 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
524                 shhwtstamps->hwtstamp = ns_to_ktime(tstamp);
525         }
526 }
527 #endif
528
529 static void enetc_get_offloads(struct enetc_bdr *rx_ring,
530                                union enetc_rx_bd *rxbd, struct sk_buff *skb)
531 {
532         struct enetc_ndev_priv *priv = netdev_priv(rx_ring->ndev);
533
534         /* TODO: hashing */
535         if (rx_ring->ndev->features & NETIF_F_RXCSUM) {
536                 u16 inet_csum = le16_to_cpu(rxbd->r.inet_csum);
537
538                 skb->csum = csum_unfold((__force __sum16)~htons(inet_csum));
539                 skb->ip_summed = CHECKSUM_COMPLETE;
540         }
541
542         if (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_VLAN) {
543                 __be16 tpid = 0;
544
545                 switch (le16_to_cpu(rxbd->r.flags) & ENETC_RXBD_FLAG_TPID) {
546                 case 0:
547                         tpid = htons(ETH_P_8021Q);
548                         break;
549                 case 1:
550                         tpid = htons(ETH_P_8021AD);
551                         break;
552                 case 2:
553                         tpid = htons(enetc_port_rd(&priv->si->hw,
554                                                    ENETC_PCVLANR1));
555                         break;
556                 case 3:
557                         tpid = htons(enetc_port_rd(&priv->si->hw,
558                                                    ENETC_PCVLANR2));
559                         break;
560                 default:
561                         break;
562                 }
563
564                 __vlan_hwaccel_put_tag(skb, tpid, le16_to_cpu(rxbd->r.vlan_opt));
565         }
566
567 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
568         if (priv->active_offloads & ENETC_F_RX_TSTAMP)
569                 enetc_get_rx_tstamp(rx_ring->ndev, rxbd, skb);
570 #endif
571 }
572
573 static void enetc_process_skb(struct enetc_bdr *rx_ring,
574                               struct sk_buff *skb)
575 {
576         skb_record_rx_queue(skb, rx_ring->index);
577         skb->protocol = eth_type_trans(skb, rx_ring->ndev);
578 }
579
580 static bool enetc_page_reusable(struct page *page)
581 {
582         return (!page_is_pfmemalloc(page) && page_ref_count(page) == 1);
583 }
584
585 static void enetc_reuse_page(struct enetc_bdr *rx_ring,
586                              struct enetc_rx_swbd *old)
587 {
588         struct enetc_rx_swbd *new;
589
590         new = &rx_ring->rx_swbd[rx_ring->next_to_alloc];
591
592         /* next buf that may reuse a page */
593         enetc_bdr_idx_inc(rx_ring, &rx_ring->next_to_alloc);
594
595         /* copy page reference */
596         *new = *old;
597 }
598
599 static struct enetc_rx_swbd *enetc_get_rx_buff(struct enetc_bdr *rx_ring,
600                                                int i, u16 size)
601 {
602         struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
603
604         dma_sync_single_range_for_cpu(rx_ring->dev, rx_swbd->dma,
605                                       rx_swbd->page_offset,
606                                       size, DMA_FROM_DEVICE);
607         return rx_swbd;
608 }
609
610 static void enetc_put_rx_buff(struct enetc_bdr *rx_ring,
611                               struct enetc_rx_swbd *rx_swbd)
612 {
613         if (likely(enetc_page_reusable(rx_swbd->page))) {
614                 rx_swbd->page_offset ^= ENETC_RXB_TRUESIZE;
615                 page_ref_inc(rx_swbd->page);
616
617                 enetc_reuse_page(rx_ring, rx_swbd);
618
619                 /* sync for use by the device */
620                 dma_sync_single_range_for_device(rx_ring->dev, rx_swbd->dma,
621                                                  rx_swbd->page_offset,
622                                                  ENETC_RXB_DMA_SIZE,
623                                                  DMA_FROM_DEVICE);
624         } else {
625                 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
626                                PAGE_SIZE, DMA_FROM_DEVICE);
627         }
628
629         rx_swbd->page = NULL;
630 }
631
632 static struct sk_buff *enetc_map_rx_buff_to_skb(struct enetc_bdr *rx_ring,
633                                                 int i, u16 size)
634 {
635         struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
636         struct sk_buff *skb;
637         void *ba;
638
639         ba = page_address(rx_swbd->page) + rx_swbd->page_offset;
640         skb = build_skb(ba - ENETC_RXB_PAD, ENETC_RXB_TRUESIZE);
641         if (unlikely(!skb)) {
642                 rx_ring->stats.rx_alloc_errs++;
643                 return NULL;
644         }
645
646         skb_reserve(skb, ENETC_RXB_PAD);
647         __skb_put(skb, size);
648
649         enetc_put_rx_buff(rx_ring, rx_swbd);
650
651         return skb;
652 }
653
654 static void enetc_add_rx_buff_to_skb(struct enetc_bdr *rx_ring, int i,
655                                      u16 size, struct sk_buff *skb)
656 {
657         struct enetc_rx_swbd *rx_swbd = enetc_get_rx_buff(rx_ring, i, size);
658
659         skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_swbd->page,
660                         rx_swbd->page_offset, size, ENETC_RXB_TRUESIZE);
661
662         enetc_put_rx_buff(rx_ring, rx_swbd);
663 }
664
665 #define ENETC_RXBD_BUNDLE 16 /* # of BDs to update at once */
666
667 static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
668                                struct napi_struct *napi, int work_limit)
669 {
670         int rx_frm_cnt = 0, rx_byte_cnt = 0;
671         int cleaned_cnt, i;
672
673         cleaned_cnt = enetc_bd_unused(rx_ring);
674         /* next descriptor to process */
675         i = rx_ring->next_to_clean;
676
677         while (likely(rx_frm_cnt < work_limit)) {
678                 union enetc_rx_bd *rxbd;
679                 struct sk_buff *skb;
680                 u32 bd_status;
681                 u16 size;
682
683                 if (cleaned_cnt >= ENETC_RXBD_BUNDLE) {
684                         int count = enetc_refill_rx_ring(rx_ring, cleaned_cnt);
685
686                         /* update ENETC's consumer index */
687                         enetc_wr_reg_hot(rx_ring->rcir, rx_ring->next_to_use);
688                         cleaned_cnt -= count;
689                 }
690
691                 rxbd = enetc_rxbd(rx_ring, i);
692                 bd_status = le32_to_cpu(rxbd->r.lstatus);
693                 if (!bd_status)
694                         break;
695
696                 enetc_wr_reg_hot(rx_ring->idr, BIT(rx_ring->index));
697                 dma_rmb(); /* for reading other rxbd fields */
698                 size = le16_to_cpu(rxbd->r.buf_len);
699                 skb = enetc_map_rx_buff_to_skb(rx_ring, i, size);
700                 if (!skb)
701                         break;
702
703                 enetc_get_offloads(rx_ring, rxbd, skb);
704
705                 cleaned_cnt++;
706
707                 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
708                 if (unlikely(++i == rx_ring->bd_count))
709                         i = 0;
710
711                 if (unlikely(bd_status &
712                              ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK))) {
713                         dev_kfree_skb(skb);
714                         while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
715                                 dma_rmb();
716                                 bd_status = le32_to_cpu(rxbd->r.lstatus);
717
718                                 rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
719                                 if (unlikely(++i == rx_ring->bd_count))
720                                         i = 0;
721                         }
722
723                         rx_ring->ndev->stats.rx_dropped++;
724                         rx_ring->ndev->stats.rx_errors++;
725
726                         break;
727                 }
728
729                 /* not last BD in frame? */
730                 while (!(bd_status & ENETC_RXBD_LSTATUS_F)) {
731                         bd_status = le32_to_cpu(rxbd->r.lstatus);
732                         size = ENETC_RXB_DMA_SIZE;
733
734                         if (bd_status & ENETC_RXBD_LSTATUS_F) {
735                                 dma_rmb();
736                                 size = le16_to_cpu(rxbd->r.buf_len);
737                         }
738
739                         enetc_add_rx_buff_to_skb(rx_ring, i, size, skb);
740
741                         cleaned_cnt++;
742
743                         rxbd = enetc_rxbd_next(rx_ring, rxbd, i);
744                         if (unlikely(++i == rx_ring->bd_count))
745                                 i = 0;
746                 }
747
748                 rx_byte_cnt += skb->len;
749
750                 enetc_process_skb(rx_ring, skb);
751
752                 napi_gro_receive(napi, skb);
753
754                 rx_frm_cnt++;
755         }
756
757         rx_ring->next_to_clean = i;
758
759         rx_ring->stats.packets += rx_frm_cnt;
760         rx_ring->stats.bytes += rx_byte_cnt;
761
762         return rx_frm_cnt;
763 }
764
765 /* Probing and Init */
766 #define ENETC_MAX_RFS_SIZE 64
767 void enetc_get_si_caps(struct enetc_si *si)
768 {
769         struct enetc_hw *hw = &si->hw;
770         u32 val;
771
772         /* find out how many of various resources we have to work with */
773         val = enetc_rd(hw, ENETC_SICAPR0);
774         si->num_rx_rings = (val >> 16) & 0xff;
775         si->num_tx_rings = val & 0xff;
776
777         val = enetc_rd(hw, ENETC_SIRFSCAPR);
778         si->num_fs_entries = ENETC_SIRFSCAPR_GET_NUM_RFS(val);
779         si->num_fs_entries = min(si->num_fs_entries, ENETC_MAX_RFS_SIZE);
780
781         si->num_rss = 0;
782         val = enetc_rd(hw, ENETC_SIPCAPR0);
783         if (val & ENETC_SIPCAPR0_RSS) {
784                 u32 rss;
785
786                 rss = enetc_rd(hw, ENETC_SIRSSCAPR);
787                 si->num_rss = ENETC_SIRSSCAPR_GET_NUM_RSS(rss);
788         }
789
790         if (val & ENETC_SIPCAPR0_QBV)
791                 si->hw_features |= ENETC_SI_F_QBV;
792
793         if (val & ENETC_SIPCAPR0_PSFP)
794                 si->hw_features |= ENETC_SI_F_PSFP;
795 }
796
797 static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size)
798 {
799         r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size,
800                                         &r->bd_dma_base, GFP_KERNEL);
801         if (!r->bd_base)
802                 return -ENOMEM;
803
804         /* h/w requires 128B alignment */
805         if (!IS_ALIGNED(r->bd_dma_base, 128)) {
806                 dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base,
807                                   r->bd_dma_base);
808                 return -EINVAL;
809         }
810
811         return 0;
812 }
813
814 static int enetc_alloc_txbdr(struct enetc_bdr *txr)
815 {
816         int err;
817
818         txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd));
819         if (!txr->tx_swbd)
820                 return -ENOMEM;
821
822         err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
823         if (err) {
824                 vfree(txr->tx_swbd);
825                 return err;
826         }
827
828         txr->next_to_clean = 0;
829         txr->next_to_use = 0;
830
831         return 0;
832 }
833
834 static void enetc_free_txbdr(struct enetc_bdr *txr)
835 {
836         int size, i;
837
838         for (i = 0; i < txr->bd_count; i++)
839                 enetc_free_tx_skb(txr, &txr->tx_swbd[i]);
840
841         size = txr->bd_count * sizeof(union enetc_tx_bd);
842
843         dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
844         txr->bd_base = NULL;
845
846         vfree(txr->tx_swbd);
847         txr->tx_swbd = NULL;
848 }
849
850 static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv)
851 {
852         int i, err;
853
854         for (i = 0; i < priv->num_tx_rings; i++) {
855                 err = enetc_alloc_txbdr(priv->tx_ring[i]);
856
857                 if (err)
858                         goto fail;
859         }
860
861         return 0;
862
863 fail:
864         while (i-- > 0)
865                 enetc_free_txbdr(priv->tx_ring[i]);
866
867         return err;
868 }
869
870 static void enetc_free_tx_resources(struct enetc_ndev_priv *priv)
871 {
872         int i;
873
874         for (i = 0; i < priv->num_tx_rings; i++)
875                 enetc_free_txbdr(priv->tx_ring[i]);
876 }
877
878 static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended)
879 {
880         size_t size = sizeof(union enetc_rx_bd);
881         int err;
882
883         rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd));
884         if (!rxr->rx_swbd)
885                 return -ENOMEM;
886
887         if (extended)
888                 size *= 2;
889
890         err = enetc_dma_alloc_bdr(rxr, size);
891         if (err) {
892                 vfree(rxr->rx_swbd);
893                 return err;
894         }
895
896         rxr->next_to_clean = 0;
897         rxr->next_to_use = 0;
898         rxr->next_to_alloc = 0;
899         rxr->ext_en = extended;
900
901         return 0;
902 }
903
904 static void enetc_free_rxbdr(struct enetc_bdr *rxr)
905 {
906         int size;
907
908         size = rxr->bd_count * sizeof(union enetc_rx_bd);
909
910         dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base);
911         rxr->bd_base = NULL;
912
913         vfree(rxr->rx_swbd);
914         rxr->rx_swbd = NULL;
915 }
916
917 static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv)
918 {
919         bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP);
920         int i, err;
921
922         for (i = 0; i < priv->num_rx_rings; i++) {
923                 err = enetc_alloc_rxbdr(priv->rx_ring[i], extended);
924
925                 if (err)
926                         goto fail;
927         }
928
929         return 0;
930
931 fail:
932         while (i-- > 0)
933                 enetc_free_rxbdr(priv->rx_ring[i]);
934
935         return err;
936 }
937
938 static void enetc_free_rx_resources(struct enetc_ndev_priv *priv)
939 {
940         int i;
941
942         for (i = 0; i < priv->num_rx_rings; i++)
943                 enetc_free_rxbdr(priv->rx_ring[i]);
944 }
945
946 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring)
947 {
948         int i;
949
950         if (!tx_ring->tx_swbd)
951                 return;
952
953         for (i = 0; i < tx_ring->bd_count; i++) {
954                 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
955
956                 enetc_free_tx_skb(tx_ring, tx_swbd);
957         }
958
959         tx_ring->next_to_clean = 0;
960         tx_ring->next_to_use = 0;
961 }
962
963 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring)
964 {
965         int i;
966
967         if (!rx_ring->rx_swbd)
968                 return;
969
970         for (i = 0; i < rx_ring->bd_count; i++) {
971                 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i];
972
973                 if (!rx_swbd->page)
974                         continue;
975
976                 dma_unmap_page(rx_ring->dev, rx_swbd->dma,
977                                PAGE_SIZE, DMA_FROM_DEVICE);
978                 __free_page(rx_swbd->page);
979                 rx_swbd->page = NULL;
980         }
981
982         rx_ring->next_to_clean = 0;
983         rx_ring->next_to_use = 0;
984         rx_ring->next_to_alloc = 0;
985 }
986
987 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv)
988 {
989         int i;
990
991         for (i = 0; i < priv->num_rx_rings; i++)
992                 enetc_free_rx_ring(priv->rx_ring[i]);
993
994         for (i = 0; i < priv->num_tx_rings; i++)
995                 enetc_free_tx_ring(priv->tx_ring[i]);
996 }
997
998 int enetc_alloc_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
999 {
1000         int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1001
1002         cbdr->bd_base = dma_alloc_coherent(dev, size, &cbdr->bd_dma_base,
1003                                            GFP_KERNEL);
1004         if (!cbdr->bd_base)
1005                 return -ENOMEM;
1006
1007         /* h/w requires 128B alignment */
1008         if (!IS_ALIGNED(cbdr->bd_dma_base, 128)) {
1009                 dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1010                 return -EINVAL;
1011         }
1012
1013         cbdr->next_to_clean = 0;
1014         cbdr->next_to_use = 0;
1015
1016         return 0;
1017 }
1018
1019 void enetc_free_cbdr(struct device *dev, struct enetc_cbdr *cbdr)
1020 {
1021         int size = cbdr->bd_count * sizeof(struct enetc_cbd);
1022
1023         dma_free_coherent(dev, size, cbdr->bd_base, cbdr->bd_dma_base);
1024         cbdr->bd_base = NULL;
1025 }
1026
1027 void enetc_setup_cbdr(struct enetc_hw *hw, struct enetc_cbdr *cbdr)
1028 {
1029         /* set CBDR cache attributes */
1030         enetc_wr(hw, ENETC_SICAR2,
1031                  ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1032
1033         enetc_wr(hw, ENETC_SICBDRBAR0, lower_32_bits(cbdr->bd_dma_base));
1034         enetc_wr(hw, ENETC_SICBDRBAR1, upper_32_bits(cbdr->bd_dma_base));
1035         enetc_wr(hw, ENETC_SICBDRLENR, ENETC_RTBLENR_LEN(cbdr->bd_count));
1036
1037         enetc_wr(hw, ENETC_SICBDRPIR, 0);
1038         enetc_wr(hw, ENETC_SICBDRCIR, 0);
1039
1040         /* enable ring */
1041         enetc_wr(hw, ENETC_SICBDRMR, BIT(31));
1042
1043         cbdr->pir = hw->reg + ENETC_SICBDRPIR;
1044         cbdr->cir = hw->reg + ENETC_SICBDRCIR;
1045 }
1046
1047 void enetc_clear_cbdr(struct enetc_hw *hw)
1048 {
1049         enetc_wr(hw, ENETC_SICBDRMR, 0);
1050 }
1051
1052 static int enetc_setup_default_rss_table(struct enetc_si *si, int num_groups)
1053 {
1054         int *rss_table;
1055         int i;
1056
1057         rss_table = kmalloc_array(si->num_rss, sizeof(*rss_table), GFP_KERNEL);
1058         if (!rss_table)
1059                 return -ENOMEM;
1060
1061         /* Set up RSS table defaults */
1062         for (i = 0; i < si->num_rss; i++)
1063                 rss_table[i] = i % num_groups;
1064
1065         enetc_set_rss_table(si, rss_table, si->num_rss);
1066
1067         kfree(rss_table);
1068
1069         return 0;
1070 }
1071
1072 int enetc_configure_si(struct enetc_ndev_priv *priv)
1073 {
1074         struct enetc_si *si = priv->si;
1075         struct enetc_hw *hw = &si->hw;
1076         int err;
1077
1078         /* set SI cache attributes */
1079         enetc_wr(hw, ENETC_SICAR0,
1080                  ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT);
1081         enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI);
1082         /* enable SI */
1083         enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN);
1084
1085         if (si->num_rss) {
1086                 err = enetc_setup_default_rss_table(si, priv->num_rx_rings);
1087                 if (err)
1088                         return err;
1089         }
1090
1091         return 0;
1092 }
1093
1094 void enetc_init_si_rings_params(struct enetc_ndev_priv *priv)
1095 {
1096         struct enetc_si *si = priv->si;
1097         int cpus = num_online_cpus();
1098
1099         priv->tx_bd_count = ENETC_TX_RING_DEFAULT_SIZE;
1100         priv->rx_bd_count = ENETC_RX_RING_DEFAULT_SIZE;
1101
1102         /* Enable all available TX rings in order to configure as many
1103          * priorities as possible, when needed.
1104          * TODO: Make # of TX rings run-time configurable
1105          */
1106         priv->num_rx_rings = min_t(int, cpus, si->num_rx_rings);
1107         priv->num_tx_rings = si->num_tx_rings;
1108         priv->bdr_int_num = cpus;
1109         priv->ic_mode = ENETC_IC_RX_ADAPTIVE | ENETC_IC_TX_MANUAL;
1110         priv->tx_ictt = ENETC_TXIC_TIMETHR;
1111
1112         /* SI specific */
1113         si->cbd_ring.bd_count = ENETC_CBDR_DEFAULT_SIZE;
1114 }
1115
1116 int enetc_alloc_si_resources(struct enetc_ndev_priv *priv)
1117 {
1118         struct enetc_si *si = priv->si;
1119         int err;
1120
1121         err = enetc_alloc_cbdr(priv->dev, &si->cbd_ring);
1122         if (err)
1123                 return err;
1124
1125         enetc_setup_cbdr(&si->hw, &si->cbd_ring);
1126
1127         priv->cls_rules = kcalloc(si->num_fs_entries, sizeof(*priv->cls_rules),
1128                                   GFP_KERNEL);
1129         if (!priv->cls_rules) {
1130                 err = -ENOMEM;
1131                 goto err_alloc_cls;
1132         }
1133
1134         return 0;
1135
1136 err_alloc_cls:
1137         enetc_clear_cbdr(&si->hw);
1138         enetc_free_cbdr(priv->dev, &si->cbd_ring);
1139
1140         return err;
1141 }
1142
1143 void enetc_free_si_resources(struct enetc_ndev_priv *priv)
1144 {
1145         struct enetc_si *si = priv->si;
1146
1147         enetc_clear_cbdr(&si->hw);
1148         enetc_free_cbdr(priv->dev, &si->cbd_ring);
1149
1150         kfree(priv->cls_rules);
1151 }
1152
1153 static void enetc_setup_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1154 {
1155         int idx = tx_ring->index;
1156         u32 tbmr;
1157
1158         enetc_txbdr_wr(hw, idx, ENETC_TBBAR0,
1159                        lower_32_bits(tx_ring->bd_dma_base));
1160
1161         enetc_txbdr_wr(hw, idx, ENETC_TBBAR1,
1162                        upper_32_bits(tx_ring->bd_dma_base));
1163
1164         WARN_ON(!IS_ALIGNED(tx_ring->bd_count, 64)); /* multiple of 64 */
1165         enetc_txbdr_wr(hw, idx, ENETC_TBLENR,
1166                        ENETC_RTBLENR_LEN(tx_ring->bd_count));
1167
1168         /* clearing PI/CI registers for Tx not supported, adjust sw indexes */
1169         tx_ring->next_to_use = enetc_txbdr_rd(hw, idx, ENETC_TBPIR);
1170         tx_ring->next_to_clean = enetc_txbdr_rd(hw, idx, ENETC_TBCIR);
1171
1172         /* enable Tx ints by setting pkt thr to 1 */
1173         enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1);
1174
1175         tbmr = ENETC_TBMR_EN;
1176         if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
1177                 tbmr |= ENETC_TBMR_VIH;
1178
1179         /* enable ring */
1180         enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr);
1181
1182         tx_ring->tpir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBPIR);
1183         tx_ring->tcir = hw->reg + ENETC_BDR(TX, idx, ENETC_TBCIR);
1184         tx_ring->idr = hw->reg + ENETC_SITXIDR;
1185 }
1186
1187 static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1188 {
1189         int idx = rx_ring->index;
1190         u32 rbmr;
1191
1192         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0,
1193                        lower_32_bits(rx_ring->bd_dma_base));
1194
1195         enetc_rxbdr_wr(hw, idx, ENETC_RBBAR1,
1196                        upper_32_bits(rx_ring->bd_dma_base));
1197
1198         WARN_ON(!IS_ALIGNED(rx_ring->bd_count, 64)); /* multiple of 64 */
1199         enetc_rxbdr_wr(hw, idx, ENETC_RBLENR,
1200                        ENETC_RTBLENR_LEN(rx_ring->bd_count));
1201
1202         enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE);
1203
1204         enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0);
1205
1206         /* enable Rx ints by setting pkt thr to 1 */
1207         enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1);
1208
1209         rbmr = ENETC_RBMR_EN;
1210
1211         if (rx_ring->ext_en)
1212                 rbmr |= ENETC_RBMR_BDS;
1213
1214         if (rx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
1215                 rbmr |= ENETC_RBMR_VTE;
1216
1217         rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR);
1218         rx_ring->idr = hw->reg + ENETC_SIRXIDR;
1219
1220         enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring));
1221         /* update ENETC's consumer index */
1222         enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, rx_ring->next_to_use);
1223
1224         /* enable ring */
1225         enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr);
1226 }
1227
1228 static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
1229 {
1230         int i;
1231
1232         for (i = 0; i < priv->num_tx_rings; i++)
1233                 enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
1234
1235         for (i = 0; i < priv->num_rx_rings; i++)
1236                 enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1237 }
1238
1239 static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
1240 {
1241         int idx = rx_ring->index;
1242
1243         /* disable EN bit on ring */
1244         enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0);
1245 }
1246
1247 static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
1248 {
1249         int delay = 8, timeout = 100;
1250         int idx = tx_ring->index;
1251
1252         /* disable EN bit on ring */
1253         enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0);
1254
1255         /* wait for busy to clear */
1256         while (delay < timeout &&
1257                enetc_txbdr_rd(hw, idx, ENETC_TBSR) & ENETC_TBSR_BUSY) {
1258                 msleep(delay);
1259                 delay *= 2;
1260         }
1261
1262         if (delay >= timeout)
1263                 netdev_warn(tx_ring->ndev, "timeout for tx ring #%d clear\n",
1264                             idx);
1265 }
1266
1267 static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
1268 {
1269         int i;
1270
1271         for (i = 0; i < priv->num_tx_rings; i++)
1272                 enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
1273
1274         for (i = 0; i < priv->num_rx_rings; i++)
1275                 enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
1276
1277         udelay(1);
1278 }
1279
1280 static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
1281 {
1282         struct pci_dev *pdev = priv->si->pdev;
1283         cpumask_t cpu_mask;
1284         int i, j, err;
1285
1286         for (i = 0; i < priv->bdr_int_num; i++) {
1287                 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1288                 struct enetc_int_vector *v = priv->int_vector[i];
1289                 int entry = ENETC_BDR_INT_BASE_IDX + i;
1290                 struct enetc_hw *hw = &priv->si->hw;
1291
1292                 snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
1293                          priv->ndev->name, i);
1294                 err = request_irq(irq, enetc_msix, 0, v->name, v);
1295                 if (err) {
1296                         dev_err(priv->dev, "request_irq() failed!\n");
1297                         goto irq_err;
1298                 }
1299                 disable_irq(irq);
1300
1301                 v->tbier_base = hw->reg + ENETC_BDR(TX, 0, ENETC_TBIER);
1302                 v->rbier = hw->reg + ENETC_BDR(RX, i, ENETC_RBIER);
1303                 v->ricr1 = hw->reg + ENETC_BDR(RX, i, ENETC_RBICR1);
1304
1305                 enetc_wr(hw, ENETC_SIMSIRRV(i), entry);
1306
1307                 for (j = 0; j < v->count_tx_rings; j++) {
1308                         int idx = v->tx_ring[j].index;
1309
1310                         enetc_wr(hw, ENETC_SIMSITRV(idx), entry);
1311                 }
1312                 cpumask_clear(&cpu_mask);
1313                 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
1314                 irq_set_affinity_hint(irq, &cpu_mask);
1315         }
1316
1317         return 0;
1318
1319 irq_err:
1320         while (i--) {
1321                 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1322
1323                 irq_set_affinity_hint(irq, NULL);
1324                 free_irq(irq, priv->int_vector[i]);
1325         }
1326
1327         return err;
1328 }
1329
1330 static void enetc_free_irqs(struct enetc_ndev_priv *priv)
1331 {
1332         struct pci_dev *pdev = priv->si->pdev;
1333         int i;
1334
1335         for (i = 0; i < priv->bdr_int_num; i++) {
1336                 int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
1337
1338                 irq_set_affinity_hint(irq, NULL);
1339                 free_irq(irq, priv->int_vector[i]);
1340         }
1341 }
1342
1343 static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
1344 {
1345         struct enetc_hw *hw = &priv->si->hw;
1346         u32 icpt, ictt;
1347         int i;
1348
1349         /* enable Tx & Rx event indication */
1350         if (priv->ic_mode &
1351             (ENETC_IC_RX_MANUAL | ENETC_IC_RX_ADAPTIVE)) {
1352                 icpt = ENETC_RBICR0_SET_ICPT(ENETC_RXIC_PKTTHR);
1353                 /* init to non-0 minimum, will be adjusted later */
1354                 ictt = 0x1;
1355         } else {
1356                 icpt = 0x1; /* enable Rx ints by setting pkt thr to 1 */
1357                 ictt = 0;
1358         }
1359
1360         for (i = 0; i < priv->num_rx_rings; i++) {
1361                 enetc_rxbdr_wr(hw, i, ENETC_RBICR1, ictt);
1362                 enetc_rxbdr_wr(hw, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | icpt);
1363                 enetc_rxbdr_wr(hw, i, ENETC_RBIER, ENETC_RBIER_RXTIE);
1364         }
1365
1366         if (priv->ic_mode & ENETC_IC_TX_MANUAL)
1367                 icpt = ENETC_TBICR0_SET_ICPT(ENETC_TXIC_PKTTHR);
1368         else
1369                 icpt = 0x1; /* enable Tx ints by setting pkt thr to 1 */
1370
1371         for (i = 0; i < priv->num_tx_rings; i++) {
1372                 enetc_txbdr_wr(hw, i, ENETC_TBICR1, priv->tx_ictt);
1373                 enetc_txbdr_wr(hw, i, ENETC_TBICR0, ENETC_TBICR0_ICEN | icpt);
1374                 enetc_txbdr_wr(hw, i, ENETC_TBIER, ENETC_TBIER_TXTIE);
1375         }
1376 }
1377
1378 static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
1379 {
1380         int i;
1381
1382         for (i = 0; i < priv->num_tx_rings; i++)
1383                 enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
1384
1385         for (i = 0; i < priv->num_rx_rings; i++)
1386                 enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
1387 }
1388
1389 static int enetc_phylink_connect(struct net_device *ndev)
1390 {
1391         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1392         struct ethtool_eee edata;
1393         int err;
1394
1395         if (!priv->phylink)
1396                 return 0; /* phy-less mode */
1397
1398         err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0);
1399         if (err) {
1400                 dev_err(&ndev->dev, "could not attach to PHY\n");
1401                 return err;
1402         }
1403
1404         /* disable EEE autoneg, until ENETC driver supports it */
1405         memset(&edata, 0, sizeof(struct ethtool_eee));
1406         phylink_ethtool_set_eee(priv->phylink, &edata);
1407
1408         return 0;
1409 }
1410
1411 void enetc_start(struct net_device *ndev)
1412 {
1413         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1414         int i;
1415
1416         enetc_setup_interrupts(priv);
1417
1418         for (i = 0; i < priv->bdr_int_num; i++) {
1419                 int irq = pci_irq_vector(priv->si->pdev,
1420                                          ENETC_BDR_INT_BASE_IDX + i);
1421
1422                 napi_enable(&priv->int_vector[i]->napi);
1423                 enable_irq(irq);
1424         }
1425
1426         if (priv->phylink)
1427                 phylink_start(priv->phylink);
1428         else
1429                 netif_carrier_on(ndev);
1430
1431         netif_tx_start_all_queues(ndev);
1432 }
1433
1434 int enetc_open(struct net_device *ndev)
1435 {
1436         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1437         int err;
1438
1439         err = enetc_setup_irqs(priv);
1440         if (err)
1441                 return err;
1442
1443         err = enetc_phylink_connect(ndev);
1444         if (err)
1445                 goto err_phy_connect;
1446
1447         err = enetc_alloc_tx_resources(priv);
1448         if (err)
1449                 goto err_alloc_tx;
1450
1451         err = enetc_alloc_rx_resources(priv);
1452         if (err)
1453                 goto err_alloc_rx;
1454
1455         err = netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1456         if (err)
1457                 goto err_set_queues;
1458
1459         err = netif_set_real_num_rx_queues(ndev, priv->num_rx_rings);
1460         if (err)
1461                 goto err_set_queues;
1462
1463         enetc_setup_bdrs(priv);
1464         enetc_start(ndev);
1465
1466         return 0;
1467
1468 err_set_queues:
1469         enetc_free_rx_resources(priv);
1470 err_alloc_rx:
1471         enetc_free_tx_resources(priv);
1472 err_alloc_tx:
1473         if (priv->phylink)
1474                 phylink_disconnect_phy(priv->phylink);
1475 err_phy_connect:
1476         enetc_free_irqs(priv);
1477
1478         return err;
1479 }
1480
1481 void enetc_stop(struct net_device *ndev)
1482 {
1483         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1484         int i;
1485
1486         netif_tx_stop_all_queues(ndev);
1487
1488         for (i = 0; i < priv->bdr_int_num; i++) {
1489                 int irq = pci_irq_vector(priv->si->pdev,
1490                                          ENETC_BDR_INT_BASE_IDX + i);
1491
1492                 disable_irq(irq);
1493                 napi_synchronize(&priv->int_vector[i]->napi);
1494                 napi_disable(&priv->int_vector[i]->napi);
1495         }
1496
1497         if (priv->phylink)
1498                 phylink_stop(priv->phylink);
1499         else
1500                 netif_carrier_off(ndev);
1501
1502         enetc_clear_interrupts(priv);
1503 }
1504
1505 int enetc_close(struct net_device *ndev)
1506 {
1507         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1508
1509         enetc_stop(ndev);
1510         enetc_clear_bdrs(priv);
1511
1512         if (priv->phylink)
1513                 phylink_disconnect_phy(priv->phylink);
1514         enetc_free_rxtx_rings(priv);
1515         enetc_free_rx_resources(priv);
1516         enetc_free_tx_resources(priv);
1517         enetc_free_irqs(priv);
1518
1519         return 0;
1520 }
1521
1522 static int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
1523 {
1524         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1525         struct tc_mqprio_qopt *mqprio = type_data;
1526         struct enetc_bdr *tx_ring;
1527         u8 num_tc;
1528         int i;
1529
1530         mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1531         num_tc = mqprio->num_tc;
1532
1533         if (!num_tc) {
1534                 netdev_reset_tc(ndev);
1535                 netif_set_real_num_tx_queues(ndev, priv->num_tx_rings);
1536
1537                 /* Reset all ring priorities to 0 */
1538                 for (i = 0; i < priv->num_tx_rings; i++) {
1539                         tx_ring = priv->tx_ring[i];
1540                         enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
1541                 }
1542
1543                 return 0;
1544         }
1545
1546         /* Check if we have enough BD rings available to accommodate all TCs */
1547         if (num_tc > priv->num_tx_rings) {
1548                 netdev_err(ndev, "Max %d traffic classes supported\n",
1549                            priv->num_tx_rings);
1550                 return -EINVAL;
1551         }
1552
1553         /* For the moment, we use only one BD ring per TC.
1554          *
1555          * Configure num_tc BD rings with increasing priorities.
1556          */
1557         for (i = 0; i < num_tc; i++) {
1558                 tx_ring = priv->tx_ring[i];
1559                 enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
1560         }
1561
1562         /* Reset the number of netdev queues based on the TC count */
1563         netif_set_real_num_tx_queues(ndev, num_tc);
1564
1565         netdev_set_num_tc(ndev, num_tc);
1566
1567         /* Each TC is associated with one netdev queue */
1568         for (i = 0; i < num_tc; i++)
1569                 netdev_set_tc_queue(ndev, i, 1, i);
1570
1571         return 0;
1572 }
1573
1574 int enetc_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1575                    void *type_data)
1576 {
1577         switch (type) {
1578         case TC_SETUP_QDISC_MQPRIO:
1579                 return enetc_setup_tc_mqprio(ndev, type_data);
1580         case TC_SETUP_QDISC_TAPRIO:
1581                 return enetc_setup_tc_taprio(ndev, type_data);
1582         case TC_SETUP_QDISC_CBS:
1583                 return enetc_setup_tc_cbs(ndev, type_data);
1584         case TC_SETUP_QDISC_ETF:
1585                 return enetc_setup_tc_txtime(ndev, type_data);
1586         case TC_SETUP_BLOCK:
1587                 return enetc_setup_tc_psfp(ndev, type_data);
1588         default:
1589                 return -EOPNOTSUPP;
1590         }
1591 }
1592
1593 struct net_device_stats *enetc_get_stats(struct net_device *ndev)
1594 {
1595         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1596         struct net_device_stats *stats = &ndev->stats;
1597         unsigned long packets = 0, bytes = 0;
1598         int i;
1599
1600         for (i = 0; i < priv->num_rx_rings; i++) {
1601                 packets += priv->rx_ring[i]->stats.packets;
1602                 bytes   += priv->rx_ring[i]->stats.bytes;
1603         }
1604
1605         stats->rx_packets = packets;
1606         stats->rx_bytes = bytes;
1607         bytes = 0;
1608         packets = 0;
1609
1610         for (i = 0; i < priv->num_tx_rings; i++) {
1611                 packets += priv->tx_ring[i]->stats.packets;
1612                 bytes   += priv->tx_ring[i]->stats.bytes;
1613         }
1614
1615         stats->tx_packets = packets;
1616         stats->tx_bytes = bytes;
1617
1618         return stats;
1619 }
1620
1621 static int enetc_set_rss(struct net_device *ndev, int en)
1622 {
1623         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1624         struct enetc_hw *hw = &priv->si->hw;
1625         u32 reg;
1626
1627         enetc_wr(hw, ENETC_SIRBGCR, priv->num_rx_rings);
1628
1629         reg = enetc_rd(hw, ENETC_SIMR);
1630         reg &= ~ENETC_SIMR_RSSE;
1631         reg |= (en) ? ENETC_SIMR_RSSE : 0;
1632         enetc_wr(hw, ENETC_SIMR, reg);
1633
1634         return 0;
1635 }
1636
1637 static int enetc_set_psfp(struct net_device *ndev, int en)
1638 {
1639         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1640         int err;
1641
1642         if (en) {
1643                 err = enetc_psfp_enable(priv);
1644                 if (err)
1645                         return err;
1646
1647                 priv->active_offloads |= ENETC_F_QCI;
1648                 return 0;
1649         }
1650
1651         err = enetc_psfp_disable(priv);
1652         if (err)
1653                 return err;
1654
1655         priv->active_offloads &= ~ENETC_F_QCI;
1656
1657         return 0;
1658 }
1659
1660 static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
1661 {
1662         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1663         int i;
1664
1665         for (i = 0; i < priv->num_rx_rings; i++)
1666                 enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
1667 }
1668
1669 static void enetc_enable_txvlan(struct net_device *ndev, bool en)
1670 {
1671         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1672         int i;
1673
1674         for (i = 0; i < priv->num_tx_rings; i++)
1675                 enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
1676 }
1677
1678 int enetc_set_features(struct net_device *ndev,
1679                        netdev_features_t features)
1680 {
1681         netdev_features_t changed = ndev->features ^ features;
1682         int err = 0;
1683
1684         if (changed & NETIF_F_RXHASH)
1685                 enetc_set_rss(ndev, !!(features & NETIF_F_RXHASH));
1686
1687         if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1688                 enetc_enable_rxvlan(ndev,
1689                                     !!(features & NETIF_F_HW_VLAN_CTAG_RX));
1690
1691         if (changed & NETIF_F_HW_VLAN_CTAG_TX)
1692                 enetc_enable_txvlan(ndev,
1693                                     !!(features & NETIF_F_HW_VLAN_CTAG_TX));
1694
1695         if (changed & NETIF_F_HW_TC)
1696                 err = enetc_set_psfp(ndev, !!(features & NETIF_F_HW_TC));
1697
1698         return err;
1699 }
1700
1701 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1702 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
1703 {
1704         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1705         struct hwtstamp_config config;
1706         int ao;
1707
1708         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1709                 return -EFAULT;
1710
1711         switch (config.tx_type) {
1712         case HWTSTAMP_TX_OFF:
1713                 priv->active_offloads &= ~ENETC_F_TX_TSTAMP;
1714                 break;
1715         case HWTSTAMP_TX_ON:
1716                 priv->active_offloads |= ENETC_F_TX_TSTAMP;
1717                 break;
1718         default:
1719                 return -ERANGE;
1720         }
1721
1722         ao = priv->active_offloads;
1723         switch (config.rx_filter) {
1724         case HWTSTAMP_FILTER_NONE:
1725                 priv->active_offloads &= ~ENETC_F_RX_TSTAMP;
1726                 break;
1727         default:
1728                 priv->active_offloads |= ENETC_F_RX_TSTAMP;
1729                 config.rx_filter = HWTSTAMP_FILTER_ALL;
1730         }
1731
1732         if (netif_running(ndev) && ao != priv->active_offloads) {
1733                 enetc_close(ndev);
1734                 enetc_open(ndev);
1735         }
1736
1737         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1738                -EFAULT : 0;
1739 }
1740
1741 static int enetc_hwtstamp_get(struct net_device *ndev, struct ifreq *ifr)
1742 {
1743         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1744         struct hwtstamp_config config;
1745
1746         config.flags = 0;
1747
1748         if (priv->active_offloads & ENETC_F_TX_TSTAMP)
1749                 config.tx_type = HWTSTAMP_TX_ON;
1750         else
1751                 config.tx_type = HWTSTAMP_TX_OFF;
1752
1753         config.rx_filter = (priv->active_offloads & ENETC_F_RX_TSTAMP) ?
1754                             HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
1755
1756         return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
1757                -EFAULT : 0;
1758 }
1759 #endif
1760
1761 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
1762 {
1763         struct enetc_ndev_priv *priv = netdev_priv(ndev);
1764 #ifdef CONFIG_FSL_ENETC_PTP_CLOCK
1765         if (cmd == SIOCSHWTSTAMP)
1766                 return enetc_hwtstamp_set(ndev, rq);
1767         if (cmd == SIOCGHWTSTAMP)
1768                 return enetc_hwtstamp_get(ndev, rq);
1769 #endif
1770
1771         if (!priv->phylink)
1772                 return -EOPNOTSUPP;
1773
1774         return phylink_mii_ioctl(priv->phylink, rq, cmd);
1775 }
1776
1777 int enetc_alloc_msix(struct enetc_ndev_priv *priv)
1778 {
1779         struct pci_dev *pdev = priv->si->pdev;
1780         int v_tx_rings;
1781         int i, n, err, nvec;
1782
1783         nvec = ENETC_BDR_INT_BASE_IDX + priv->bdr_int_num;
1784         /* allocate MSIX for both messaging and Rx/Tx interrupts */
1785         n = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
1786
1787         if (n < 0)
1788                 return n;
1789
1790         if (n != nvec)
1791                 return -EPERM;
1792
1793         /* # of tx rings per int vector */
1794         v_tx_rings = priv->num_tx_rings / priv->bdr_int_num;
1795
1796         for (i = 0; i < priv->bdr_int_num; i++) {
1797                 struct enetc_int_vector *v;
1798                 struct enetc_bdr *bdr;
1799                 int j;
1800
1801                 v = kzalloc(struct_size(v, tx_ring, v_tx_rings), GFP_KERNEL);
1802                 if (!v) {
1803                         err = -ENOMEM;
1804                         goto fail;
1805                 }
1806
1807                 priv->int_vector[i] = v;
1808
1809                 /* init defaults for adaptive IC */
1810                 if (priv->ic_mode & ENETC_IC_RX_ADAPTIVE) {
1811                         v->rx_ictt = 0x1;
1812                         v->rx_dim_en = true;
1813                 }
1814                 INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
1815                 netif_napi_add(priv->ndev, &v->napi, enetc_poll,
1816                                NAPI_POLL_WEIGHT);
1817                 v->count_tx_rings = v_tx_rings;
1818
1819                 for (j = 0; j < v_tx_rings; j++) {
1820                         int idx;
1821
1822                         /* default tx ring mapping policy */
1823                         if (priv->bdr_int_num == ENETC_MAX_BDR_INT)
1824                                 idx = 2 * j + i; /* 2 CPUs */
1825                         else
1826                                 idx = j + i * v_tx_rings; /* default */
1827
1828                         __set_bit(idx, &v->tx_rings_map);
1829                         bdr = &v->tx_ring[j];
1830                         bdr->index = idx;
1831                         bdr->ndev = priv->ndev;
1832                         bdr->dev = priv->dev;
1833                         bdr->bd_count = priv->tx_bd_count;
1834                         priv->tx_ring[idx] = bdr;
1835                 }
1836
1837                 bdr = &v->rx_ring;
1838                 bdr->index = i;
1839                 bdr->ndev = priv->ndev;
1840                 bdr->dev = priv->dev;
1841                 bdr->bd_count = priv->rx_bd_count;
1842                 priv->rx_ring[i] = bdr;
1843         }
1844
1845         return 0;
1846
1847 fail:
1848         while (i--) {
1849                 netif_napi_del(&priv->int_vector[i]->napi);
1850                 cancel_work_sync(&priv->int_vector[i]->rx_dim.work);
1851                 kfree(priv->int_vector[i]);
1852         }
1853
1854         pci_free_irq_vectors(pdev);
1855
1856         return err;
1857 }
1858
1859 void enetc_free_msix(struct enetc_ndev_priv *priv)
1860 {
1861         int i;
1862
1863         for (i = 0; i < priv->bdr_int_num; i++) {
1864                 struct enetc_int_vector *v = priv->int_vector[i];
1865
1866                 netif_napi_del(&v->napi);
1867                 cancel_work_sync(&v->rx_dim.work);
1868         }
1869
1870         for (i = 0; i < priv->num_rx_rings; i++)
1871                 priv->rx_ring[i] = NULL;
1872
1873         for (i = 0; i < priv->num_tx_rings; i++)
1874                 priv->tx_ring[i] = NULL;
1875
1876         for (i = 0; i < priv->bdr_int_num; i++) {
1877                 kfree(priv->int_vector[i]);
1878                 priv->int_vector[i] = NULL;
1879         }
1880
1881         /* disable all MSIX for this device */
1882         pci_free_irq_vectors(priv->si->pdev);
1883 }
1884
1885 static void enetc_kfree_si(struct enetc_si *si)
1886 {
1887         char *p = (char *)si - si->pad;
1888
1889         kfree(p);
1890 }
1891
1892 static void enetc_detect_errata(struct enetc_si *si)
1893 {
1894         if (si->pdev->revision == ENETC_REV1)
1895                 si->errata = ENETC_ERR_VLAN_ISOL | ENETC_ERR_UCMCSWP;
1896 }
1897
1898 int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
1899 {
1900         struct enetc_si *si, *p;
1901         struct enetc_hw *hw;
1902         size_t alloc_size;
1903         int err, len;
1904
1905         pcie_flr(pdev);
1906         err = pci_enable_device_mem(pdev);
1907         if (err) {
1908                 dev_err(&pdev->dev, "device enable failed\n");
1909                 return err;
1910         }
1911
1912         /* set up for high or low dma */
1913         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1914         if (err) {
1915                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1916                 if (err) {
1917                         dev_err(&pdev->dev,
1918                                 "DMA configuration failed: 0x%x\n", err);
1919                         goto err_dma;
1920                 }
1921         }
1922
1923         err = pci_request_mem_regions(pdev, name);
1924         if (err) {
1925                 dev_err(&pdev->dev, "pci_request_regions failed err=%d\n", err);
1926                 goto err_pci_mem_reg;
1927         }
1928
1929         pci_set_master(pdev);
1930
1931         alloc_size = sizeof(struct enetc_si);
1932         if (sizeof_priv) {
1933                 /* align priv to 32B */
1934                 alloc_size = ALIGN(alloc_size, ENETC_SI_ALIGN);
1935                 alloc_size += sizeof_priv;
1936         }
1937         /* force 32B alignment for enetc_si */
1938         alloc_size += ENETC_SI_ALIGN - 1;
1939
1940         p = kzalloc(alloc_size, GFP_KERNEL);
1941         if (!p) {
1942                 err = -ENOMEM;
1943                 goto err_alloc_si;
1944         }
1945
1946         si = PTR_ALIGN(p, ENETC_SI_ALIGN);
1947         si->pad = (char *)si - (char *)p;
1948
1949         pci_set_drvdata(pdev, si);
1950         si->pdev = pdev;
1951         hw = &si->hw;
1952
1953         len = pci_resource_len(pdev, ENETC_BAR_REGS);
1954         hw->reg = ioremap(pci_resource_start(pdev, ENETC_BAR_REGS), len);
1955         if (!hw->reg) {
1956                 err = -ENXIO;
1957                 dev_err(&pdev->dev, "ioremap() failed\n");
1958                 goto err_ioremap;
1959         }
1960         if (len > ENETC_PORT_BASE)
1961                 hw->port = hw->reg + ENETC_PORT_BASE;
1962         if (len > ENETC_GLOBAL_BASE)
1963                 hw->global = hw->reg + ENETC_GLOBAL_BASE;
1964
1965         enetc_detect_errata(si);
1966
1967         return 0;
1968
1969 err_ioremap:
1970         enetc_kfree_si(si);
1971 err_alloc_si:
1972         pci_release_mem_regions(pdev);
1973 err_pci_mem_reg:
1974 err_dma:
1975         pci_disable_device(pdev);
1976
1977         return err;
1978 }
1979
1980 void enetc_pci_remove(struct pci_dev *pdev)
1981 {
1982         struct enetc_si *si = pci_get_drvdata(pdev);
1983         struct enetc_hw *hw = &si->hw;
1984
1985         iounmap(hw->reg);
1986         enetc_kfree_si(si);
1987         pci_release_mem_regions(pdev);
1988         pci_disable_device(pdev);
1989 }