Merge tag 'pstore-v6.2-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / drivers / net / ethernet / microchip / lan966x / lan966x_fdma.c
1 // SPDX-License-Identifier: GPL-2.0+
2
3 #include <linux/bpf.h>
4 #include <linux/filter.h>
5
6 #include "lan966x_main.h"
7
8 static int lan966x_fdma_channel_active(struct lan966x *lan966x)
9 {
10         return lan_rd(lan966x, FDMA_CH_ACTIVE);
11 }
12
13 static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
14                                                struct lan966x_db *db)
15 {
16         struct page *page;
17
18         page = page_pool_dev_alloc_pages(rx->page_pool);
19         if (unlikely(!page))
20                 return NULL;
21
22         db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
23
24         return page;
25 }
26
27 static void lan966x_fdma_rx_free_pages(struct lan966x_rx *rx)
28 {
29         int i, j;
30
31         for (i = 0; i < FDMA_DCB_MAX; ++i) {
32                 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j)
33                         page_pool_put_full_page(rx->page_pool,
34                                                 rx->page[i][j], false);
35         }
36 }
37
38 static void lan966x_fdma_rx_free_page(struct lan966x_rx *rx)
39 {
40         struct page *page;
41
42         page = rx->page[rx->dcb_index][rx->db_index];
43         if (unlikely(!page))
44                 return;
45
46         page_pool_recycle_direct(rx->page_pool, page);
47 }
48
49 static void lan966x_fdma_rx_add_dcb(struct lan966x_rx *rx,
50                                     struct lan966x_rx_dcb *dcb,
51                                     u64 nextptr)
52 {
53         struct lan966x_db *db;
54         int i;
55
56         for (i = 0; i < FDMA_RX_DCB_MAX_DBS; ++i) {
57                 db = &dcb->db[i];
58                 db->status = FDMA_DCB_STATUS_INTR;
59         }
60
61         dcb->nextptr = FDMA_DCB_INVALID_DATA;
62         dcb->info = FDMA_DCB_INFO_DATAL(PAGE_SIZE << rx->page_order);
63
64         rx->last_entry->nextptr = nextptr;
65         rx->last_entry = dcb;
66 }
67
68 static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
69 {
70         struct lan966x *lan966x = rx->lan966x;
71         struct page_pool_params pp_params = {
72                 .order = rx->page_order,
73                 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
74                 .pool_size = FDMA_DCB_MAX,
75                 .nid = NUMA_NO_NODE,
76                 .dev = lan966x->dev,
77                 .dma_dir = DMA_FROM_DEVICE,
78                 .offset = XDP_PACKET_HEADROOM,
79                 .max_len = rx->max_mtu -
80                            SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
81         };
82
83         if (lan966x_xdp_present(lan966x))
84                 pp_params.dma_dir = DMA_BIDIRECTIONAL;
85
86         rx->page_pool = page_pool_create(&pp_params);
87
88         for (int i = 0; i < lan966x->num_phys_ports; ++i) {
89                 struct lan966x_port *port;
90
91                 if (!lan966x->ports[i])
92                         continue;
93
94                 port = lan966x->ports[i];
95                 xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
96                 xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
97                                            rx->page_pool);
98         }
99
100         return PTR_ERR_OR_ZERO(rx->page_pool);
101 }
102
103 static int lan966x_fdma_rx_alloc(struct lan966x_rx *rx)
104 {
105         struct lan966x *lan966x = rx->lan966x;
106         struct lan966x_rx_dcb *dcb;
107         struct lan966x_db *db;
108         struct page *page;
109         int i, j;
110         int size;
111
112         if (lan966x_fdma_rx_alloc_page_pool(rx))
113                 return PTR_ERR(rx->page_pool);
114
115         /* calculate how many pages are needed to allocate the dcbs */
116         size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
117         size = ALIGN(size, PAGE_SIZE);
118
119         rx->dcbs = dma_alloc_coherent(lan966x->dev, size, &rx->dma, GFP_KERNEL);
120         if (!rx->dcbs)
121                 return -ENOMEM;
122
123         rx->last_entry = rx->dcbs;
124         rx->db_index = 0;
125         rx->dcb_index = 0;
126
127         /* Now for each dcb allocate the dbs */
128         for (i = 0; i < FDMA_DCB_MAX; ++i) {
129                 dcb = &rx->dcbs[i];
130                 dcb->info = 0;
131
132                 /* For each db allocate a page and map it to the DB dataptr. */
133                 for (j = 0; j < FDMA_RX_DCB_MAX_DBS; ++j) {
134                         db = &dcb->db[j];
135                         page = lan966x_fdma_rx_alloc_page(rx, db);
136                         if (!page)
137                                 return -ENOMEM;
138
139                         db->status = 0;
140                         rx->page[i][j] = page;
141                 }
142
143                 lan966x_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * i);
144         }
145
146         return 0;
147 }
148
149 static void lan966x_fdma_rx_advance_dcb(struct lan966x_rx *rx)
150 {
151         rx->dcb_index++;
152         rx->dcb_index &= FDMA_DCB_MAX - 1;
153 }
154
155 static void lan966x_fdma_rx_free(struct lan966x_rx *rx)
156 {
157         struct lan966x *lan966x = rx->lan966x;
158         u32 size;
159
160         /* Now it is possible to do the cleanup of dcb */
161         size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
162         size = ALIGN(size, PAGE_SIZE);
163         dma_free_coherent(lan966x->dev, size, rx->dcbs, rx->dma);
164 }
165
166 static void lan966x_fdma_rx_start(struct lan966x_rx *rx)
167 {
168         struct lan966x *lan966x = rx->lan966x;
169         u32 mask;
170
171         /* When activating a channel, first is required to write the first DCB
172          * address and then to activate it
173          */
174         lan_wr(lower_32_bits((u64)rx->dma), lan966x,
175                FDMA_DCB_LLP(rx->channel_id));
176         lan_wr(upper_32_bits((u64)rx->dma), lan966x,
177                FDMA_DCB_LLP1(rx->channel_id));
178
179         lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) |
180                FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
181                FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
182                FDMA_CH_CFG_CH_MEM_SET(1),
183                lan966x, FDMA_CH_CFG(rx->channel_id));
184
185         /* Start fdma */
186         lan_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0),
187                 FDMA_PORT_CTRL_XTR_STOP,
188                 lan966x, FDMA_PORT_CTRL(0));
189
190         /* Enable interrupts */
191         mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
192         mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
193         mask |= BIT(rx->channel_id);
194         lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
195                 FDMA_INTR_DB_ENA_INTR_DB_ENA,
196                 lan966x, FDMA_INTR_DB_ENA);
197
198         /* Activate the channel */
199         lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(rx->channel_id)),
200                 FDMA_CH_ACTIVATE_CH_ACTIVATE,
201                 lan966x, FDMA_CH_ACTIVATE);
202 }
203
204 static void lan966x_fdma_rx_disable(struct lan966x_rx *rx)
205 {
206         struct lan966x *lan966x = rx->lan966x;
207         u32 val;
208
209         /* Disable the channel */
210         lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(rx->channel_id)),
211                 FDMA_CH_DISABLE_CH_DISABLE,
212                 lan966x, FDMA_CH_DISABLE);
213
214         readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
215                                   val, !(val & BIT(rx->channel_id)),
216                                   READL_SLEEP_US, READL_TIMEOUT_US);
217
218         lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(rx->channel_id)),
219                 FDMA_CH_DB_DISCARD_DB_DISCARD,
220                 lan966x, FDMA_CH_DB_DISCARD);
221 }
222
223 static void lan966x_fdma_rx_reload(struct lan966x_rx *rx)
224 {
225         struct lan966x *lan966x = rx->lan966x;
226
227         lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(rx->channel_id)),
228                 FDMA_CH_RELOAD_CH_RELOAD,
229                 lan966x, FDMA_CH_RELOAD);
230 }
231
232 static void lan966x_fdma_tx_add_dcb(struct lan966x_tx *tx,
233                                     struct lan966x_tx_dcb *dcb)
234 {
235         dcb->nextptr = FDMA_DCB_INVALID_DATA;
236         dcb->info = 0;
237 }
238
239 static int lan966x_fdma_tx_alloc(struct lan966x_tx *tx)
240 {
241         struct lan966x *lan966x = tx->lan966x;
242         struct lan966x_tx_dcb *dcb;
243         struct lan966x_db *db;
244         int size;
245         int i, j;
246
247         tx->dcbs_buf = kcalloc(FDMA_DCB_MAX, sizeof(struct lan966x_tx_dcb_buf),
248                                GFP_KERNEL);
249         if (!tx->dcbs_buf)
250                 return -ENOMEM;
251
252         /* calculate how many pages are needed to allocate the dcbs */
253         size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
254         size = ALIGN(size, PAGE_SIZE);
255         tx->dcbs = dma_alloc_coherent(lan966x->dev, size, &tx->dma, GFP_KERNEL);
256         if (!tx->dcbs)
257                 goto out;
258
259         /* Now for each dcb allocate the db */
260         for (i = 0; i < FDMA_DCB_MAX; ++i) {
261                 dcb = &tx->dcbs[i];
262
263                 for (j = 0; j < FDMA_TX_DCB_MAX_DBS; ++j) {
264                         db = &dcb->db[j];
265                         db->dataptr = 0;
266                         db->status = 0;
267                 }
268
269                 lan966x_fdma_tx_add_dcb(tx, dcb);
270         }
271
272         return 0;
273
274 out:
275         kfree(tx->dcbs_buf);
276         return -ENOMEM;
277 }
278
279 static void lan966x_fdma_tx_free(struct lan966x_tx *tx)
280 {
281         struct lan966x *lan966x = tx->lan966x;
282         int size;
283
284         kfree(tx->dcbs_buf);
285
286         size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX;
287         size = ALIGN(size, PAGE_SIZE);
288         dma_free_coherent(lan966x->dev, size, tx->dcbs, tx->dma);
289 }
290
291 static void lan966x_fdma_tx_activate(struct lan966x_tx *tx)
292 {
293         struct lan966x *lan966x = tx->lan966x;
294         u32 mask;
295
296         /* When activating a channel, first is required to write the first DCB
297          * address and then to activate it
298          */
299         lan_wr(lower_32_bits((u64)tx->dma), lan966x,
300                FDMA_DCB_LLP(tx->channel_id));
301         lan_wr(upper_32_bits((u64)tx->dma), lan966x,
302                FDMA_DCB_LLP1(tx->channel_id));
303
304         lan_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) |
305                FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) |
306                FDMA_CH_CFG_CH_INJ_PORT_SET(0) |
307                FDMA_CH_CFG_CH_MEM_SET(1),
308                lan966x, FDMA_CH_CFG(tx->channel_id));
309
310         /* Start fdma */
311         lan_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0),
312                 FDMA_PORT_CTRL_INJ_STOP,
313                 lan966x, FDMA_PORT_CTRL(0));
314
315         /* Enable interrupts */
316         mask = lan_rd(lan966x, FDMA_INTR_DB_ENA);
317         mask = FDMA_INTR_DB_ENA_INTR_DB_ENA_GET(mask);
318         mask |= BIT(tx->channel_id);
319         lan_rmw(FDMA_INTR_DB_ENA_INTR_DB_ENA_SET(mask),
320                 FDMA_INTR_DB_ENA_INTR_DB_ENA,
321                 lan966x, FDMA_INTR_DB_ENA);
322
323         /* Activate the channel */
324         lan_rmw(FDMA_CH_ACTIVATE_CH_ACTIVATE_SET(BIT(tx->channel_id)),
325                 FDMA_CH_ACTIVATE_CH_ACTIVATE,
326                 lan966x, FDMA_CH_ACTIVATE);
327 }
328
329 static void lan966x_fdma_tx_disable(struct lan966x_tx *tx)
330 {
331         struct lan966x *lan966x = tx->lan966x;
332         u32 val;
333
334         /* Disable the channel */
335         lan_rmw(FDMA_CH_DISABLE_CH_DISABLE_SET(BIT(tx->channel_id)),
336                 FDMA_CH_DISABLE_CH_DISABLE,
337                 lan966x, FDMA_CH_DISABLE);
338
339         readx_poll_timeout_atomic(lan966x_fdma_channel_active, lan966x,
340                                   val, !(val & BIT(tx->channel_id)),
341                                   READL_SLEEP_US, READL_TIMEOUT_US);
342
343         lan_rmw(FDMA_CH_DB_DISCARD_DB_DISCARD_SET(BIT(tx->channel_id)),
344                 FDMA_CH_DB_DISCARD_DB_DISCARD,
345                 lan966x, FDMA_CH_DB_DISCARD);
346
347         tx->activated = false;
348         tx->last_in_use = -1;
349 }
350
351 static void lan966x_fdma_tx_reload(struct lan966x_tx *tx)
352 {
353         struct lan966x *lan966x = tx->lan966x;
354
355         /* Write the registers to reload the channel */
356         lan_rmw(FDMA_CH_RELOAD_CH_RELOAD_SET(BIT(tx->channel_id)),
357                 FDMA_CH_RELOAD_CH_RELOAD,
358                 lan966x, FDMA_CH_RELOAD);
359 }
360
361 static void lan966x_fdma_wakeup_netdev(struct lan966x *lan966x)
362 {
363         struct lan966x_port *port;
364         int i;
365
366         for (i = 0; i < lan966x->num_phys_ports; ++i) {
367                 port = lan966x->ports[i];
368                 if (!port)
369                         continue;
370
371                 if (netif_queue_stopped(port->dev))
372                         netif_wake_queue(port->dev);
373         }
374 }
375
376 static void lan966x_fdma_stop_netdev(struct lan966x *lan966x)
377 {
378         struct lan966x_port *port;
379         int i;
380
381         for (i = 0; i < lan966x->num_phys_ports; ++i) {
382                 port = lan966x->ports[i];
383                 if (!port)
384                         continue;
385
386                 netif_stop_queue(port->dev);
387         }
388 }
389
390 static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
391 {
392         struct lan966x_tx *tx = &lan966x->tx;
393         struct lan966x_tx_dcb_buf *dcb_buf;
394         struct xdp_frame_bulk bq;
395         struct lan966x_db *db;
396         unsigned long flags;
397         bool clear = false;
398         int i;
399
400         xdp_frame_bulk_init(&bq);
401
402         spin_lock_irqsave(&lan966x->tx_lock, flags);
403         for (i = 0; i < FDMA_DCB_MAX; ++i) {
404                 dcb_buf = &tx->dcbs_buf[i];
405
406                 if (!dcb_buf->used)
407                         continue;
408
409                 db = &tx->dcbs[i].db[0];
410                 if (!(db->status & FDMA_DCB_STATUS_DONE))
411                         continue;
412
413                 dcb_buf->dev->stats.tx_packets++;
414                 dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
415
416                 dcb_buf->used = false;
417                 if (dcb_buf->use_skb) {
418                         dma_unmap_single(lan966x->dev,
419                                          dcb_buf->dma_addr,
420                                          dcb_buf->len,
421                                          DMA_TO_DEVICE);
422
423                         if (!dcb_buf->ptp)
424                                 napi_consume_skb(dcb_buf->data.skb, weight);
425                 } else {
426                         if (dcb_buf->xdp_ndo)
427                                 dma_unmap_single(lan966x->dev,
428                                                  dcb_buf->dma_addr,
429                                                  dcb_buf->len,
430                                                  DMA_TO_DEVICE);
431
432                         if (dcb_buf->xdp_ndo)
433                                 xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
434                         else
435                                 xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
436                 }
437
438                 clear = true;
439         }
440
441         xdp_flush_frame_bulk(&bq);
442
443         if (clear)
444                 lan966x_fdma_wakeup_netdev(lan966x);
445
446         spin_unlock_irqrestore(&lan966x->tx_lock, flags);
447 }
448
449 static bool lan966x_fdma_rx_more_frames(struct lan966x_rx *rx)
450 {
451         struct lan966x_db *db;
452
453         /* Check if there is any data */
454         db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
455         if (unlikely(!(db->status & FDMA_DCB_STATUS_DONE)))
456                 return false;
457
458         return true;
459 }
460
461 static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
462 {
463         struct lan966x *lan966x = rx->lan966x;
464         struct lan966x_port *port;
465         struct lan966x_db *db;
466         struct page *page;
467
468         db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
469         page = rx->page[rx->dcb_index][rx->db_index];
470         if (unlikely(!page))
471                 return FDMA_ERROR;
472
473         dma_sync_single_for_cpu(lan966x->dev,
474                                 (dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
475                                 FDMA_DCB_STATUS_BLOCKL(db->status),
476                                 DMA_FROM_DEVICE);
477
478         lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
479                                  src_port);
480         if (WARN_ON(*src_port >= lan966x->num_phys_ports))
481                 return FDMA_ERROR;
482
483         port = lan966x->ports[*src_port];
484         if (!lan966x_xdp_port_present(port))
485                 return FDMA_PASS;
486
487         return lan966x_xdp_run(port, page, FDMA_DCB_STATUS_BLOCKL(db->status));
488 }
489
490 static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
491                                                  u64 src_port)
492 {
493         struct lan966x *lan966x = rx->lan966x;
494         struct lan966x_db *db;
495         struct sk_buff *skb;
496         struct page *page;
497         u64 timestamp;
498
499         /* Get the received frame and unmap it */
500         db = &rx->dcbs[rx->dcb_index].db[rx->db_index];
501         page = rx->page[rx->dcb_index][rx->db_index];
502
503         skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order);
504         if (unlikely(!skb))
505                 goto free_page;
506
507         skb_mark_for_recycle(skb);
508
509         skb_reserve(skb, XDP_PACKET_HEADROOM);
510         skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
511
512         lan966x_ifh_get_timestamp(skb->data, &timestamp);
513
514         skb->dev = lan966x->ports[src_port]->dev;
515         skb_pull(skb, IFH_LEN_BYTES);
516
517         if (likely(!(skb->dev->features & NETIF_F_RXFCS)))
518                 skb_trim(skb, skb->len - ETH_FCS_LEN);
519
520         lan966x_ptp_rxtstamp(lan966x, skb, timestamp);
521         skb->protocol = eth_type_trans(skb, skb->dev);
522
523         if (lan966x->bridge_mask & BIT(src_port)) {
524                 skb->offload_fwd_mark = 1;
525
526                 skb_reset_network_header(skb);
527                 if (!lan966x_hw_offload(lan966x, src_port, skb))
528                         skb->offload_fwd_mark = 0;
529         }
530
531         skb->dev->stats.rx_bytes += skb->len;
532         skb->dev->stats.rx_packets++;
533
534         return skb;
535
536 free_page:
537         page_pool_recycle_direct(rx->page_pool, page);
538
539         return NULL;
540 }
541
542 static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
543 {
544         struct lan966x *lan966x = container_of(napi, struct lan966x, napi);
545         struct lan966x_rx *rx = &lan966x->rx;
546         int dcb_reload = rx->dcb_index;
547         struct lan966x_rx_dcb *old_dcb;
548         struct lan966x_db *db;
549         bool redirect = false;
550         struct sk_buff *skb;
551         struct page *page;
552         int counter = 0;
553         u64 src_port;
554         u64 nextptr;
555
556         lan966x_fdma_tx_clear_buf(lan966x, weight);
557
558         /* Get all received skb */
559         while (counter < weight) {
560                 if (!lan966x_fdma_rx_more_frames(rx))
561                         break;
562
563                 counter++;
564
565                 switch (lan966x_fdma_rx_check_frame(rx, &src_port)) {
566                 case FDMA_PASS:
567                         break;
568                 case FDMA_ERROR:
569                         lan966x_fdma_rx_free_page(rx);
570                         lan966x_fdma_rx_advance_dcb(rx);
571                         goto allocate_new;
572                 case FDMA_REDIRECT:
573                         redirect = true;
574                         fallthrough;
575                 case FDMA_TX:
576                         lan966x_fdma_rx_advance_dcb(rx);
577                         continue;
578                 case FDMA_DROP:
579                         lan966x_fdma_rx_free_page(rx);
580                         lan966x_fdma_rx_advance_dcb(rx);
581                         continue;
582                 }
583
584                 skb = lan966x_fdma_rx_get_frame(rx, src_port);
585                 lan966x_fdma_rx_advance_dcb(rx);
586                 if (!skb)
587                         goto allocate_new;
588
589                 napi_gro_receive(&lan966x->napi, skb);
590         }
591
592 allocate_new:
593         /* Allocate new pages and map them */
594         while (dcb_reload != rx->dcb_index) {
595                 db = &rx->dcbs[dcb_reload].db[rx->db_index];
596                 page = lan966x_fdma_rx_alloc_page(rx, db);
597                 if (unlikely(!page))
598                         break;
599                 rx->page[dcb_reload][rx->db_index] = page;
600
601                 old_dcb = &rx->dcbs[dcb_reload];
602                 dcb_reload++;
603                 dcb_reload &= FDMA_DCB_MAX - 1;
604
605                 nextptr = rx->dma + ((unsigned long)old_dcb -
606                                      (unsigned long)rx->dcbs);
607                 lan966x_fdma_rx_add_dcb(rx, old_dcb, nextptr);
608                 lan966x_fdma_rx_reload(rx);
609         }
610
611         if (counter < weight && napi_complete_done(napi, counter))
612                 lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
613
614         if (redirect)
615                 xdp_do_flush();
616
617         return counter;
618 }
619
620 irqreturn_t lan966x_fdma_irq_handler(int irq, void *args)
621 {
622         struct lan966x *lan966x = args;
623         u32 db, err, err_type;
624
625         db = lan_rd(lan966x, FDMA_INTR_DB);
626         err = lan_rd(lan966x, FDMA_INTR_ERR);
627
628         if (db) {
629                 lan_wr(0, lan966x, FDMA_INTR_DB_ENA);
630                 lan_wr(db, lan966x, FDMA_INTR_DB);
631
632                 napi_schedule(&lan966x->napi);
633         }
634
635         if (err) {
636                 err_type = lan_rd(lan966x, FDMA_ERRORS);
637
638                 WARN(1, "Unexpected error: %d, error_type: %d\n", err, err_type);
639
640                 lan_wr(err, lan966x, FDMA_INTR_ERR);
641                 lan_wr(err_type, lan966x, FDMA_ERRORS);
642         }
643
644         return IRQ_HANDLED;
645 }
646
647 static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
648 {
649         struct lan966x_tx_dcb_buf *dcb_buf;
650         int i;
651
652         for (i = 0; i < FDMA_DCB_MAX; ++i) {
653                 dcb_buf = &tx->dcbs_buf[i];
654                 if (!dcb_buf->used && i != tx->last_in_use)
655                         return i;
656         }
657
658         return -1;
659 }
660
661 static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
662                                       int next_to_use, int len,
663                                       dma_addr_t dma_addr)
664 {
665         struct lan966x_tx_dcb *next_dcb;
666         struct lan966x_db *next_db;
667
668         next_dcb = &tx->dcbs[next_to_use];
669         next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
670
671         next_db = &next_dcb->db[0];
672         next_db->dataptr = dma_addr;
673         next_db->status = FDMA_DCB_STATUS_SOF |
674                           FDMA_DCB_STATUS_EOF |
675                           FDMA_DCB_STATUS_INTR |
676                           FDMA_DCB_STATUS_BLOCKO(0) |
677                           FDMA_DCB_STATUS_BLOCKL(len);
678 }
679
680 static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
681 {
682         struct lan966x *lan966x = tx->lan966x;
683         struct lan966x_tx_dcb *dcb;
684
685         if (likely(lan966x->tx.activated)) {
686                 /* Connect current dcb to the next db */
687                 dcb = &tx->dcbs[tx->last_in_use];
688                 dcb->nextptr = tx->dma + (next_to_use *
689                                           sizeof(struct lan966x_tx_dcb));
690
691                 lan966x_fdma_tx_reload(tx);
692         } else {
693                 /* Because it is first time, then just activate */
694                 lan966x->tx.activated = true;
695                 lan966x_fdma_tx_activate(tx);
696         }
697
698         /* Move to next dcb because this last in use */
699         tx->last_in_use = next_to_use;
700 }
701
702 int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
703                            struct xdp_frame *xdpf,
704                            struct page *page,
705                            bool dma_map)
706 {
707         struct lan966x *lan966x = port->lan966x;
708         struct lan966x_tx_dcb_buf *next_dcb_buf;
709         struct lan966x_tx *tx = &lan966x->tx;
710         dma_addr_t dma_addr;
711         int next_to_use;
712         __be32 *ifh;
713         int ret = 0;
714
715         spin_lock(&lan966x->tx_lock);
716
717         /* Get next index */
718         next_to_use = lan966x_fdma_get_next_dcb(tx);
719         if (next_to_use < 0) {
720                 netif_stop_queue(port->dev);
721                 ret = NETDEV_TX_BUSY;
722                 goto out;
723         }
724
725         /* Generate new IFH */
726         if (dma_map) {
727                 if (xdpf->headroom < IFH_LEN_BYTES) {
728                         ret = NETDEV_TX_OK;
729                         goto out;
730                 }
731
732                 ifh = xdpf->data - IFH_LEN_BYTES;
733                 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
734                 lan966x_ifh_set_bypass(ifh, 1);
735                 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
736
737                 dma_addr = dma_map_single(lan966x->dev,
738                                           xdpf->data - IFH_LEN_BYTES,
739                                           xdpf->len + IFH_LEN_BYTES,
740                                           DMA_TO_DEVICE);
741                 if (dma_mapping_error(lan966x->dev, dma_addr)) {
742                         ret = NETDEV_TX_OK;
743                         goto out;
744                 }
745
746                 /* Setup next dcb */
747                 lan966x_fdma_tx_setup_dcb(tx, next_to_use,
748                                           xdpf->len + IFH_LEN_BYTES,
749                                           dma_addr);
750         } else {
751                 ifh = page_address(page) + XDP_PACKET_HEADROOM;
752                 memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
753                 lan966x_ifh_set_bypass(ifh, 1);
754                 lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
755
756                 dma_addr = page_pool_get_dma_addr(page);
757                 dma_sync_single_for_device(lan966x->dev,
758                                            dma_addr + XDP_PACKET_HEADROOM,
759                                            xdpf->len + IFH_LEN_BYTES,
760                                            DMA_TO_DEVICE);
761
762                 /* Setup next dcb */
763                 lan966x_fdma_tx_setup_dcb(tx, next_to_use,
764                                           xdpf->len + IFH_LEN_BYTES,
765                                           dma_addr + XDP_PACKET_HEADROOM);
766         }
767
768         /* Fill up the buffer */
769         next_dcb_buf = &tx->dcbs_buf[next_to_use];
770         next_dcb_buf->use_skb = false;
771         next_dcb_buf->data.xdpf = xdpf;
772         next_dcb_buf->xdp_ndo = dma_map;
773         next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
774         next_dcb_buf->dma_addr = dma_addr;
775         next_dcb_buf->used = true;
776         next_dcb_buf->ptp = false;
777         next_dcb_buf->dev = port->dev;
778
779         /* Start the transmission */
780         lan966x_fdma_tx_start(tx, next_to_use);
781
782 out:
783         spin_unlock(&lan966x->tx_lock);
784
785         return ret;
786 }
787
788 int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
789 {
790         struct lan966x_port *port = netdev_priv(dev);
791         struct lan966x *lan966x = port->lan966x;
792         struct lan966x_tx_dcb_buf *next_dcb_buf;
793         struct lan966x_tx *tx = &lan966x->tx;
794         int needed_headroom;
795         int needed_tailroom;
796         dma_addr_t dma_addr;
797         int next_to_use;
798         int err;
799
800         /* Get next index */
801         next_to_use = lan966x_fdma_get_next_dcb(tx);
802         if (next_to_use < 0) {
803                 netif_stop_queue(dev);
804                 return NETDEV_TX_BUSY;
805         }
806
807         if (skb_put_padto(skb, ETH_ZLEN)) {
808                 dev->stats.tx_dropped++;
809                 return NETDEV_TX_OK;
810         }
811
812         /* skb processing */
813         needed_headroom = max_t(int, IFH_LEN_BYTES - skb_headroom(skb), 0);
814         needed_tailroom = max_t(int, ETH_FCS_LEN - skb_tailroom(skb), 0);
815         if (needed_headroom || needed_tailroom || skb_header_cloned(skb)) {
816                 err = pskb_expand_head(skb, needed_headroom, needed_tailroom,
817                                        GFP_ATOMIC);
818                 if (unlikely(err)) {
819                         dev->stats.tx_dropped++;
820                         err = NETDEV_TX_OK;
821                         goto release;
822                 }
823         }
824
825         skb_tx_timestamp(skb);
826         skb_push(skb, IFH_LEN_BYTES);
827         memcpy(skb->data, ifh, IFH_LEN_BYTES);
828         skb_put(skb, 4);
829
830         dma_addr = dma_map_single(lan966x->dev, skb->data, skb->len,
831                                   DMA_TO_DEVICE);
832         if (dma_mapping_error(lan966x->dev, dma_addr)) {
833                 dev->stats.tx_dropped++;
834                 err = NETDEV_TX_OK;
835                 goto release;
836         }
837
838         /* Setup next dcb */
839         lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
840
841         /* Fill up the buffer */
842         next_dcb_buf = &tx->dcbs_buf[next_to_use];
843         next_dcb_buf->use_skb = true;
844         next_dcb_buf->data.skb = skb;
845         next_dcb_buf->xdp_ndo = false;
846         next_dcb_buf->len = skb->len;
847         next_dcb_buf->dma_addr = dma_addr;
848         next_dcb_buf->used = true;
849         next_dcb_buf->ptp = false;
850         next_dcb_buf->dev = dev;
851
852         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
853             LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
854                 next_dcb_buf->ptp = true;
855
856         /* Start the transmission */
857         lan966x_fdma_tx_start(tx, next_to_use);
858
859         return NETDEV_TX_OK;
860
861 release:
862         if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
863             LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
864                 lan966x_ptp_txtstamp_release(port, skb);
865
866         dev_kfree_skb_any(skb);
867         return err;
868 }
869
870 static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x)
871 {
872         int max_mtu = 0;
873         int i;
874
875         for (i = 0; i < lan966x->num_phys_ports; ++i) {
876                 struct lan966x_port *port;
877                 int mtu;
878
879                 port = lan966x->ports[i];
880                 if (!port)
881                         continue;
882
883                 mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
884                 if (mtu > max_mtu)
885                         max_mtu = mtu;
886         }
887
888         return max_mtu;
889 }
890
891 static int lan966x_qsys_sw_status(struct lan966x *lan966x)
892 {
893         return lan_rd(lan966x, QSYS_SW_STATUS(CPU_PORT));
894 }
895
896 static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu)
897 {
898         struct page_pool *page_pool;
899         dma_addr_t rx_dma;
900         void *rx_dcbs;
901         u32 size;
902         int err;
903
904         /* Store these for later to free them */
905         rx_dma = lan966x->rx.dma;
906         rx_dcbs = lan966x->rx.dcbs;
907         page_pool = lan966x->rx.page_pool;
908
909         napi_synchronize(&lan966x->napi);
910         napi_disable(&lan966x->napi);
911         lan966x_fdma_stop_netdev(lan966x);
912
913         lan966x_fdma_rx_disable(&lan966x->rx);
914         lan966x_fdma_rx_free_pages(&lan966x->rx);
915         lan966x->rx.page_order = round_up(new_mtu, PAGE_SIZE) / PAGE_SIZE - 1;
916         lan966x->rx.max_mtu = new_mtu;
917         err = lan966x_fdma_rx_alloc(&lan966x->rx);
918         if (err)
919                 goto restore;
920         lan966x_fdma_rx_start(&lan966x->rx);
921
922         size = sizeof(struct lan966x_rx_dcb) * FDMA_DCB_MAX;
923         size = ALIGN(size, PAGE_SIZE);
924         dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma);
925
926         page_pool_destroy(page_pool);
927
928         lan966x_fdma_wakeup_netdev(lan966x);
929         napi_enable(&lan966x->napi);
930
931         return err;
932 restore:
933         lan966x->rx.page_pool = page_pool;
934         lan966x->rx.dma = rx_dma;
935         lan966x->rx.dcbs = rx_dcbs;
936         lan966x_fdma_rx_start(&lan966x->rx);
937
938         return err;
939 }
940
941 static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
942 {
943         return lan966x_fdma_get_max_mtu(lan966x) +
944                IFH_LEN_BYTES +
945                SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
946                VLAN_HLEN * 2 +
947                XDP_PACKET_HEADROOM;
948 }
949
950 static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
951 {
952         int err;
953         u32 val;
954
955         /* Disable the CPU port */
956         lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
957                 QSYS_SW_PORT_MODE_PORT_ENA,
958                 lan966x, QSYS_SW_PORT_MODE(CPU_PORT));
959
960         /* Flush the CPU queues */
961         readx_poll_timeout(lan966x_qsys_sw_status, lan966x,
962                            val, !(QSYS_SW_STATUS_EQ_AVAIL_GET(val)),
963                            READL_SLEEP_US, READL_TIMEOUT_US);
964
965         /* Add a sleep in case there are frames between the queues and the CPU
966          * port
967          */
968         usleep_range(1000, 2000);
969
970         err = lan966x_fdma_reload(lan966x, max_mtu);
971
972         /* Enable back the CPU port */
973         lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(1),
974                 QSYS_SW_PORT_MODE_PORT_ENA,
975                 lan966x,  QSYS_SW_PORT_MODE(CPU_PORT));
976
977         return err;
978 }
979
980 int lan966x_fdma_change_mtu(struct lan966x *lan966x)
981 {
982         int max_mtu;
983
984         max_mtu = lan966x_fdma_get_max_frame(lan966x);
985         if (max_mtu == lan966x->rx.max_mtu)
986                 return 0;
987
988         return __lan966x_fdma_reload(lan966x, max_mtu);
989 }
990
991 int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
992 {
993         int max_mtu;
994
995         max_mtu = lan966x_fdma_get_max_frame(lan966x);
996         return __lan966x_fdma_reload(lan966x, max_mtu);
997 }
998
999 void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
1000 {
1001         if (lan966x->fdma_ndev)
1002                 return;
1003
1004         lan966x->fdma_ndev = dev;
1005         netif_napi_add(dev, &lan966x->napi, lan966x_fdma_napi_poll);
1006         napi_enable(&lan966x->napi);
1007 }
1008
1009 void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev)
1010 {
1011         if (lan966x->fdma_ndev == dev) {
1012                 netif_napi_del(&lan966x->napi);
1013                 lan966x->fdma_ndev = NULL;
1014         }
1015 }
1016
1017 int lan966x_fdma_init(struct lan966x *lan966x)
1018 {
1019         int err;
1020
1021         if (!lan966x->fdma)
1022                 return 0;
1023
1024         lan966x->rx.lan966x = lan966x;
1025         lan966x->rx.channel_id = FDMA_XTR_CHANNEL;
1026         lan966x->rx.max_mtu = lan966x_fdma_get_max_frame(lan966x);
1027         lan966x->tx.lan966x = lan966x;
1028         lan966x->tx.channel_id = FDMA_INJ_CHANNEL;
1029         lan966x->tx.last_in_use = -1;
1030
1031         err = lan966x_fdma_rx_alloc(&lan966x->rx);
1032         if (err)
1033                 return err;
1034
1035         err = lan966x_fdma_tx_alloc(&lan966x->tx);
1036         if (err) {
1037                 lan966x_fdma_rx_free(&lan966x->rx);
1038                 return err;
1039         }
1040
1041         lan966x_fdma_rx_start(&lan966x->rx);
1042
1043         return 0;
1044 }
1045
1046 void lan966x_fdma_deinit(struct lan966x *lan966x)
1047 {
1048         if (!lan966x->fdma)
1049                 return;
1050
1051         lan966x_fdma_rx_disable(&lan966x->rx);
1052         lan966x_fdma_tx_disable(&lan966x->tx);
1053
1054         napi_synchronize(&lan966x->napi);
1055         napi_disable(&lan966x->napi);
1056
1057         lan966x_fdma_rx_free_pages(&lan966x->rx);
1058         lan966x_fdma_rx_free(&lan966x->rx);
1059         page_pool_destroy(lan966x->rx.page_pool);
1060         lan966x_fdma_tx_free(&lan966x->tx);
1061 }