net: thunderx: Optimize RBDR descriptor handling
authorSunil Goutham <sgoutham@cavium.com>
Tue, 2 May 2017 13:06:51 +0000 (18:36 +0530)
committerDavid S. Miller <davem@davemloft.net>
Tue, 2 May 2017 19:41:20 +0000 (15:41 -0400)
Receive buffer's physical address or iova will anyway not
go beyond 49bits, since it is the max supported HW address.
As per perf, updating bitfields i.e buf_addr:42 in RBDR
descriptor entry consumes lots of cpu cycles, hence changed
it to a 64bit field with alignment requirements taken care of.

Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/cavium/thunder/q_struct.h

index 12f9709bb1808c6c343fec4730fe947aeb34e0b8..dfc85a169127f7ad08af3c9371f2363579805d86 100644 (file)
@@ -257,7 +257,7 @@ static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
                }
 
                desc = GET_RBDR_DESC(rbdr, idx);
-               desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
+               desc->buf_addr = (u64)rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
        }
 
        nicvf_get_page(nic);
@@ -286,7 +286,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        /* Release page references */
        while (head != tail) {
                desc = GET_RBDR_DESC(rbdr, head);
-               buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+               buf_addr = desc->buf_addr;
                phys_addr = nicvf_iova_to_phys(nic, buf_addr);
                dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
                                     DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
@@ -297,7 +297,7 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
        }
        /* Release buffer of tail desc */
        desc = GET_RBDR_DESC(rbdr, tail);
-       buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+       buf_addr = desc->buf_addr;
        phys_addr = nicvf_iova_to_phys(nic, buf_addr);
        dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
                             DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
@@ -364,7 +364,7 @@ refill:
                        break;
 
                desc = GET_RBDR_DESC(rbdr, tail);
-               desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
+               desc->buf_addr = (u64)rbuf & ~(NICVF_RCV_BUF_ALIGN_BYTES - 1);
                refill_rb_cnt--;
                new_rb++;
        }
index f36347237a5425c7baf48540ad3fa2fd555b882d..e47205aa87eabf7605df93fc465ffe1a1a24083b 100644 (file)
@@ -359,15 +359,7 @@ union cq_desc_t {
 };
 
 struct rbdr_entry_t {
-#if defined(__BIG_ENDIAN_BITFIELD)
-       u64   rsvd0:15;
-       u64   buf_addr:42;
-       u64   cache_align:7;
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
-       u64   cache_align:7;
-       u64   buf_addr:42;
-       u64   rsvd0:15;
-#endif
+       u64   buf_addr;
 };
 
 /* TCP reassembly context */