net: Use skb_frag_off accessors
authorJonathan Lemon <jonathan.lemon@gmail.com>
Tue, 30 Jul 2019 14:40:33 +0000 (07:40 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 30 Jul 2019 21:21:32 +0000 (14:21 -0700)
Use accessor functions for skb fragment's page_offset instead
of direct references, in preparation for bvec conversion.

Signed-off-by: Jonathan Lemon <jonathan.lemon@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
44 files changed:
drivers/atm/eni.c
drivers/hsi/clients/ssi_protocol.c
drivers/infiniband/hw/hfi1/vnic_sdma.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb3/sge.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/iavf/iavf_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sun/cassini.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunvnet_common.c
drivers/net/ethernet/ti/netcp_core.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/thunderbolt.c
drivers/net/usb/usbnet.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe_transport.c
drivers/scsi/qedf/qedf_main.c
drivers/staging/unisys/visornic/visornic_main.c
drivers/target/iscsi/cxgbit/cxgbit_target.c
net/appletalk/ddp.c
net/core/datagram.c
net/core/dev.c
net/core/pktgen.c
net/core/skbuff.c
net/ipv4/tcp.c
net/ipv4/tcp_output.c
net/kcm/kcmsock.c
net/tls/tls_device.c
net/tls/tls_device_fallback.c
net/xfrm/xfrm_ipcomp.c

index 79b718430cd1e9124621e1aee9708c5ef150973f..b23d1e4bad33b20653b213047d815f388dfd0d47 100644 (file)
@@ -1136,7 +1136,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
                        else
                                put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
                                    skb_frag_page(&skb_shinfo(skb)->frags[i]) +
-                                       skb_shinfo(skb)->frags[i].page_offset,
+                                       skb_frag_off(&skb_shinfo(skb)->frags[i]),
                                    skb_frag_size(&skb_shinfo(skb)->frags[i]));
        }
        if (skb->len & 3) {
index c9e3f928b93de07d23f67c973237d40f75f70f0f..0253e76f1df27bb5cbe7ff68b8f62117436a641d 100644 (file)
@@ -182,7 +182,7 @@ static void ssip_skb_to_msg(struct sk_buff *skb, struct hsi_msg *msg)
                BUG_ON(!sg);
                frag = &skb_shinfo(skb)->frags[i];
                sg_set_page(sg, skb_frag_page(frag), skb_frag_size(frag),
-                               frag->page_offset);
+                               skb_frag_off(frag));
        }
 }
 
index 05a140504a999440f0c4c9203f9fe02ae8ec76c7..7d90b900131ba23fe4337ebe0c5e2ca2d3aa7192 100644 (file)
@@ -108,7 +108,7 @@ static noinline int build_vnic_ulp_payload(struct sdma_engine *sde,
                ret = sdma_txadd_page(sde->dd,
                                      &tx->txreq,
                                      skb_frag_page(frag),
-                                     frag->page_offset,
+                                     skb_frag_off(frag),
                                      skb_frag_size(frag));
                if (unlikely(ret))
                        goto bail_txadd;
index 78fa777c87b1997c0f7ab8629801338923070aea..c332b47618160327966b493a7493e7006833d345 100644 (file)
@@ -293,7 +293,8 @@ int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
                const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                mapping[i + off] = ib_dma_map_page(ca,
                                                 skb_frag_page(frag),
-                                                frag->page_offset, skb_frag_size(frag),
+                                                skb_frag_off(frag),
+                                                skb_frag_size(frag),
                                                 DMA_TO_DEVICE);
                if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
                        goto partial_error;
index ac61c9352535b676604f8857e122b1d361e36076..c23fbb34f0e9453625be07530bb75bfcca354a18 100644 (file)
@@ -957,7 +957,7 @@ static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
 
        frag = &skb_shinfo(skb)->frags[0];
        skb_frag_size_sub(frag, payload);
-       frag->page_offset += payload;
+       skb_frag_off_add(frag, payload);
        skb->data_len -= payload;
        skb->tail += payload;
 
index c0266a87794c2121a6b203b72d636670ae2d4618..4ab57d33a87e435eb473e49fc82beaa4a9fc8d44 100644 (file)
@@ -1594,7 +1594,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
                size = skb_frag_size(frag);
                dma_addr = dma_map_page_attrs(&nic->pdev->dev,
                                              skb_frag_page(frag),
-                                             frag->page_offset, size,
+                                             skb_frag_off(frag), size,
                                              DMA_TO_DEVICE,
                                              DMA_ATTR_SKIP_CPU_SYNC);
                if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
index 310a232e00f0b987da5eb28d02c16f102092c80d..6dabbf1502c713fd197666dfd4d5975634c0044f 100644 (file)
@@ -2182,7 +2182,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
 
        rx_frag += nr_frags;
        __skb_frag_set_page(rx_frag, sd->pg_chunk.page);
-       rx_frag->page_offset = sd->pg_chunk.offset + offset;
+       skb_frag_off_set(rx_frag, sd->pg_chunk.offset + offset);
        skb_frag_size_set(rx_frag, len);
 
        skb->len += len;
index e00a94a038790f68f0a824e1f43cd7068faddb5a..1c9883019767aa2ae62f9677105263a96b3c9ce6 100644 (file)
@@ -2346,8 +2346,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
                memcpy(skb->data, start, hdr_len);
                skb_shinfo(skb)->nr_frags = 1;
                skb_frag_set_page(skb, 0, page_info->page);
-               skb_shinfo(skb)->frags[0].page_offset =
-                                       page_info->page_offset + hdr_len;
+               skb_frag_off_set(&skb_shinfo(skb)->frags[0],
+                                page_info->page_offset + hdr_len);
                skb_frag_size_set(&skb_shinfo(skb)->frags[0],
                                  curr_frag_len - hdr_len);
                skb->data_len = curr_frag_len - hdr_len;
@@ -2372,8 +2372,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
                        /* Fresh page */
                        j++;
                        skb_frag_set_page(skb, j, page_info->page);
-                       skb_shinfo(skb)->frags[j].page_offset =
-                                                       page_info->page_offset;
+                       skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+                                        page_info->page_offset);
                        skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
                        skb_shinfo(skb)->nr_frags++;
                } else {
@@ -2454,8 +2454,8 @@ static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
                        /* First frag or Fresh page */
                        j++;
                        skb_frag_set_page(skb, j, page_info->page);
-                       skb_shinfo(skb)->frags[j].page_offset =
-                                                       page_info->page_offset;
+                       skb_frag_off_set(&skb_shinfo(skb)->frags[j],
+                                        page_info->page_offset);
                        skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
                } else {
                        put_page(page_info->page);
index 5fad73b2e12386763f3ef9797b00131956a5847f..3981c06f082f1ab157e0f7e255d6e4324b6c5334 100644 (file)
@@ -501,7 +501,7 @@ fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                nr_frags = skb_shinfo(skb)->nr_frags;
                frag = skb_shinfo(skb)->frags;
                for (i = 0; i < nr_frags; i++, frag++) {
-                       if (!IS_ALIGNED(frag->page_offset, 4)) {
+                       if (!IS_ALIGNED(skb_frag_off(frag), 4)) {
                                is_aligned = 0;
                                break;
                        }
index 3da6800732656477bd5d932373dde23d1b4dde9c..81a05ea38237e85903e19ce209b95e5d5298ac9c 100644 (file)
@@ -1485,7 +1485,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
 
                        memcpy(dst + cur,
                               page_address(skb_frag_page(frag)) +
-                              frag->page_offset, skb_frag_size(frag));
+                              skb_frag_off(frag), skb_frag_size(frag));
                        cur += skb_frag_size(frag);
                }
        } else {
index f162252f01b5040eb65995c256b3500a5553d293..e3f29dc8b290a4ead38bcffe62cdb947bdfcf48f 100644 (file)
@@ -3306,7 +3306,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
                 * descriptor associated with the fragment.
                 */
                if (stale_size > I40E_MAX_DATA_PER_TXD) {
-                       int align_pad = -(stale->page_offset) &
+                       int align_pad = -(skb_frag_off(stale)) &
                                        (I40E_MAX_READ_REQ_SIZE - 1);
 
                        sum -= align_pad;
index fae7cd1c618a50d5dff8b8c657d7ed211aa9ac7c..7a30d5d5ef53aa190dab8a4c8c3f8676d713221f 100644 (file)
@@ -2205,7 +2205,7 @@ bool __iavf_chk_linearize(struct sk_buff *skb)
                 * descriptor associated with the fragment.
                 */
                if (stale_size > IAVF_MAX_DATA_PER_TXD) {
-                       int align_pad = -(stale->page_offset) &
+                       int align_pad = -(skb_frag_off(stale)) &
                                        (IAVF_MAX_READ_REQ_SIZE - 1);
 
                        sum -= align_pad;
index e12d23d1fa64a86aff4ccff885b975ce1b391903..dc7b128c780e82b2ac26f0f20720c32923915bec 100644 (file)
@@ -1807,7 +1807,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring,
 
        /* update all of the pointers */
        skb_frag_size_sub(frag, pull_len);
-       frag->page_offset += pull_len;
+       skb_frag_off_add(frag, pull_len);
        skb->data_len -= pull_len;
        skb->tail += pull_len;
 }
@@ -1844,7 +1844,7 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring,
 
                dma_sync_single_range_for_cpu(rx_ring->dev,
                                              IXGBE_CB(skb)->dma,
-                                             frag->page_offset,
+                                             skb_frag_off(frag),
                                              skb_frag_size(frag),
                                              DMA_FROM_DEVICE);
        }
index 9c3ab00643bdfb9b6824dc906d75a36df796c897..6d52cf5ce20ec646d633e6c34e70a833fd52a6ca 100644 (file)
@@ -2040,8 +2040,8 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
                ctxbi = txbi + ((idx + i + 2) & (mask));
 
                ret = jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi,
-                               skb_frag_page(frag),
-                               frag->page_offset, skb_frag_size(frag), hidma);
+                                     skb_frag_page(frag), skb_frag_off(frag),
+                                     skb_frag_size(frag), hidma);
                if (ret) {
                        jme_drop_tx_map(jme, idx, i);
                        goto out;
index 88ea5ac83c93f6f7ae80b28cd5480b8a8f68025e..82ea55ae5053da4e7124a3769e04cb27874fdcb9 100644 (file)
@@ -659,7 +659,7 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
        for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
                const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag];
 
-               if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7)
+               if (skb_frag_size(fragp) <= 8 && skb_frag_off(fragp) & 7)
                        return 1;
        }
 
index 9ead6ecb7586dfcf780da518d26fa4162ba52103..99eaadba555fb34635ec42a8395722bf96a5c9ae 100644 (file)
@@ -1306,8 +1306,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
                skb->len -= VLAN_HLEN;
                skb->data_len -= VLAN_HLEN;
                frag = skb_shinfo(skb)->frags;
-               frag->page_offset += VLAN_HLEN;
-               skb_frag_size_set(frag, skb_frag_size(frag) - VLAN_HLEN);
+               skb_frag_off_add(frag, VLAN_HLEN);
+               skb_frag_size_sub(frag, VLAN_HLEN);
        }
 }
 
@@ -1364,7 +1364,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
        }
 
        /* remove padding */
-       rx_frags[0].page_offset += MXGEFW_PAD;
+       skb_frag_off_add(&rx_frags[0], MXGEFW_PAD);
        skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD);
        len -= MXGEFW_PAD;
 
index 31ec56091a5d68c06b8bddb0d5e4a15bf2e189fe..65e81ec1b3140422969f41ab80bc443067174377 100644 (file)
@@ -274,7 +274,7 @@ static void efx_skb_copy_bits_to_pio(struct efx_nic *efx, struct sk_buff *skb,
 
                vaddr = kmap_atomic(skb_frag_page(f));
 
-               efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + f->page_offset,
+               efx_memcpy_toio_aligned_cb(efx, piobuf, vaddr + skb_frag_off(f),
                                           skb_frag_size(f), copy_buf);
                kunmap_atomic(vaddr);
        }
index 6fc05c106afc6fcef6e8f8855cdcbc588c33fea1..c91876f8c536abc88e47e99c47100938e99e396f 100644 (file)
@@ -2034,7 +2034,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 
                __skb_frag_set_page(frag, page->buffer);
                __skb_frag_ref(frag);
-               frag->page_offset = off;
+               skb_frag_off_set(frag, off);
                skb_frag_size_set(frag, hlen - swivel);
 
                /* any more data? */
@@ -2058,7 +2058,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
 
                        __skb_frag_set_page(frag, page->buffer);
                        __skb_frag_ref(frag);
-                       frag->page_offset = 0;
+                       skb_frag_off_set(frag, 0);
                        skb_frag_size_set(frag, hlen);
                        RX_USED_ADD(page, hlen + cp->crc_size);
                }
@@ -2816,7 +2816,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
                mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len,
                                           DMA_TO_DEVICE);
 
-               tabort = cas_calc_tabort(cp, fragp->page_offset, len);
+               tabort = cas_calc_tabort(cp, skb_frag_off(fragp), len);
                if (unlikely(tabort)) {
                        void *addr;
 
@@ -2827,7 +2827,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
 
                        addr = cas_page_map(skb_frag_page(fragp));
                        memcpy(tx_tiny_buf(cp, ring, entry),
-                              addr + fragp->page_offset + len - tabort,
+                              addr + skb_frag_off(fragp) + len - tabort,
                               tabort);
                        cas_page_unmap(addr);
                        mapping = tx_tiny_map(cp, ring, entry, tentry);
index 0bc5863bffeb41b2bab9519b45bf73ce2d17c12e..f5fd1f3c07cc5c3914c0bbb86f9a59549c172a13 100644 (file)
@@ -6695,7 +6695,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
 
                len = skb_frag_size(frag);
                mapping = np->ops->map_page(np->device, skb_frag_page(frag),
-                                           frag->page_offset, len,
+                                           skb_frag_off(frag), len,
                                            DMA_TO_DEVICE);
 
                rp->tx_buffs[prod].skb = NULL;
index baa3088b475c758f3cbcf17bdf7cb0e222ba2caf..646e67236b65ca29687c69c500008e11ed9145d9 100644 (file)
@@ -1088,7 +1088,7 @@ static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
                        vaddr = kmap_atomic(skb_frag_page(f));
                        blen = skb_frag_size(f);
                        blen += 8 - (blen & 7);
-                       err = ldc_map_single(lp, vaddr + f->page_offset,
+                       err = ldc_map_single(lp, vaddr + skb_frag_off(f),
                                             blen, cookies + nc, ncookies - nc,
                                             map_perm);
                        kunmap_atomic(vaddr);
@@ -1124,7 +1124,7 @@ static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
-               docopy |= f->page_offset & 7;
+               docopy |= skb_frag_off(f) & 7;
        }
        if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
            skb_tailroom(skb) < pad ||
index 642843945031c222a5f5225832c83df11aa87145..1b2702f7445520234187ec66230fe1dacd0d2a26 100644 (file)
@@ -1116,7 +1116,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
                struct page *page = skb_frag_page(frag);
-               u32 page_offset = frag->page_offset;
+               u32 page_offset = skb_frag_off(frag);
                u32 buf_len = skb_frag_size(frag);
                dma_addr_t desc_dma;
                u32 desc_dma_32;
index 3544e19915792df3e162b83983bdbf4647a42ab7..86884c8630130ad9acc41f5cedd1df1eea1a0a1e 100644 (file)
@@ -435,7 +435,7 @@ static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
 
                slots_used += fill_pg_buf(skb_frag_page(frag),
-                                       frag->page_offset,
+                                       skb_frag_off(frag),
                                        skb_frag_size(frag), &pb[slots_used]);
        }
        return slots_used;
@@ -449,7 +449,7 @@ static int count_skb_frag_slots(struct sk_buff *skb)
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
                unsigned long size = skb_frag_size(frag);
-               unsigned long offset = frag->page_offset;
+               unsigned long offset = skb_frag_off(frag);
 
                /* Skip unused frames from start of page */
                offset &= ~PAGE_MASK;
index fcf31335a8b62f6c3dcf31b1270b0b8d1c6cdeb7..dacb4f680fd48b41d4dc430d95744edde794d240 100644 (file)
@@ -1005,7 +1005,7 @@ static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
        const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
 
        *len = skb_frag_size(frag);
-       return kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+       return kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
 }
 
 static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
index ace7ffaf391307baa46e1cb81ceb1ecd2be2f25d..58952a79b05fb3122aace9452833759774d14835 100644 (file)
@@ -1328,7 +1328,7 @@ static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
 
                total_len += skb_frag_size(f);
                sg_set_page(&urb->sg[i + s], skb_frag_page(f), skb_frag_size(f),
-                               f->page_offset);
+                           skb_frag_off(f));
        }
        urb->transfer_buffer_length = total_len;
 
index 03feaeae89cd3a92d5b940db58d898bed5c20bf2..216acf37ca7c56dbcecde22b73bcc85f08805181 100644 (file)
@@ -662,7 +662,7 @@ vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
        BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
 
        __skb_frag_set_page(frag, rbi->page);
-       frag->page_offset = 0;
+       skb_frag_off_set(frag, 0);
        skb_frag_size_set(frag, rcd->len);
        skb->data_len += rcd->len;
        skb->truesize += PAGE_SIZE;
index a96c5c2a2c5af5eb05004c731b55f20b7575fb40..3ef07b63613e8af08a5a611cf5edafdc9ba07243 100644 (file)
@@ -136,12 +136,12 @@ static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
 
 static u16 frag_get_pending_idx(skb_frag_t *frag)
 {
-       return (u16)frag->page_offset;
+       return (u16)skb_frag_off(frag);
 }
 
 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
 {
-       frag->page_offset = pending_idx;
+       skb_frag_off_set(frag, pending_idx);
 }
 
 static inline pending_ring_idx_t pending_index(unsigned i)
@@ -1068,7 +1068,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
 
                offset += len;
                __skb_frag_set_page(&frags[i], page);
-               frags[i].page_offset = 0;
+               skb_frag_off_set(&frags[i], 0);
                skb_frag_size_set(&frags[i], len);
        }
 
index 8d33970a2950ea824831426cc4ef935dd06e6a6a..b930d5f9522234c98116f2ea20b2c366f754a813 100644 (file)
@@ -531,7 +531,7 @@ static int xennet_count_skb_slots(struct sk_buff *skb)
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
                unsigned long size = skb_frag_size(frag);
-               unsigned long offset = frag->page_offset;
+               unsigned long offset = skb_frag_off(frag);
 
                /* Skip unused frames from start of page */
                offset &= ~PAGE_MASK;
@@ -674,8 +674,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
        /* Requests for all the frags. */
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-               tx = xennet_make_txreqs(queue, tx, skb,
-                                       skb_frag_page(frag), frag->page_offset,
+               tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag),
+                                       skb_frag_off(frag),
                                        skb_frag_size(frag));
        }
 
@@ -1040,7 +1040,7 @@ err:
                if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
                        NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
 
-               skb_shinfo(skb)->frags[0].page_offset = rx->offset;
+               skb_frag_off_set(&skb_shinfo(skb)->frags[0], rx->offset);
                skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
                skb->data_len = rx->status;
                skb->len += rx->status;
index 7796799bf04a376aff8f46e82c1278b7fdf5ebf7..9ff9429395eb59335ca53f73de3267b5cc847bf4 100644 (file)
@@ -346,7 +346,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-               cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+               cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
        } else {
                cp = skb_put(skb, tlen);
        }
index 00dd47bcbb1e0fa81e104cd5850277530461f60b..587d4bbb7d226e77dbc4463f1a56de10e513dcaf 100644 (file)
@@ -1522,8 +1522,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-               cp = kmap_atomic(skb_frag_page(frag))
-                       + frag->page_offset;
+               cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
        } else {
                cp = skb_put(skb, tlen);
        }
index d0550384cc38df4a6d64aad87d41fc7cde33b39a..a20ddc301c89e76dc0569118c1d06294b84f3940 100644 (file)
@@ -318,7 +318,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                frag = &skb_shinfo(skb)->frags[i];
-               off = frag->page_offset;
+               off = skb_frag_off(frag);
                len = skb_frag_size(frag);
                while (len > 0) {
                        clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
index a42babde036d702228503e2d8a488b816dc3def4..42542720962f054dd6c22315b8e99a02aa48d8f9 100644 (file)
@@ -1077,7 +1077,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp)
                        return -ENOMEM;
                }
                frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
-               cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
+               cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag);
        } else {
                cp = skb_put(skb, tlen);
        }
index b889b04a6e25e2538cda4fe647b8e2513b6ded54..6fa7726185deef212c345b6055e49b943c70937d 100644 (file)
@@ -284,7 +284,7 @@ static int visor_copy_fragsinfo_from_skb(struct sk_buff *skb,
                for (frag = 0; frag < numfrags; frag++) {
                        count = add_physinfo_entries(page_to_pfn(
                                  skb_frag_page(&skb_shinfo(skb)->frags[frag])),
-                                 skb_shinfo(skb)->frags[frag].page_offset,
+                                 skb_frag_off(&skb_shinfo(skb)->frags[frag]),
                                  skb_frag_size(&skb_shinfo(skb)->frags[frag]),
                                  count, frags_max, frags);
                        /* add_physinfo_entries only returns
index c25315431ad006a320b5666e3e9b6ac81015f892..fcdc4211e3c27f2896d0782a9b2693dbeaf9f5d6 100644 (file)
@@ -900,7 +900,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
 
                sg_init_table(&ccmd->sg, 1);
                sg_set_page(&ccmd->sg, skb_frag_page(dfrag),
-                               skb_frag_size(dfrag), dfrag->page_offset);
+                               skb_frag_size(dfrag), skb_frag_off(dfrag));
                get_page(skb_frag_page(dfrag));
 
                cmd->se_cmd.t_data_sg = &ccmd->sg;
@@ -1403,7 +1403,7 @@ static void cxgbit_lro_skb_dump(struct sk_buff *skb)
                        pdu_cb->ddigest, pdu_cb->frags);
        for (i = 0; i < ssi->nr_frags; i++)
                pr_info("skb 0x%p, frag %d, off %u, sz %u.\n",
-                       skb, i, ssi->frags[i].page_offset,
+                       skb, i, skb_frag_off(&ssi->frags[i]),
                        skb_frag_size(&ssi->frags[i]));
 }
 
index a8cb6b2e20c15b2183739ce06e51d35d4c0f823f..4072e9d394d61ddf9bb7afc1f49205967277307c 100644 (file)
@@ -953,8 +953,8 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
                        if (copy > len)
                                copy = len;
                        vaddr = kmap_atomic(skb_frag_page(frag));
-                       sum = atalk_sum_partial(vaddr + frag->page_offset +
-                                                 offset - start, copy, sum);
+                       sum = atalk_sum_partial(vaddr + skb_frag_off(frag) +
+                                               offset - start, copy, sum);
                        kunmap_atomic(vaddr);
 
                        if (!(len -= copy))
index 45a162ef5e02570189b006492ad8cd65c33c1990..4cc8dc5db2b73471ae3a15fda753912d5e869624 100644 (file)
@@ -442,8 +442,8 @@ static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
 
                        if (copy > len)
                                copy = len;
-                       n = cb(vaddr + frag->page_offset +
-                               offset - start, copy, data, to);
+                       n = cb(vaddr + skb_frag_off(frag) + offset - start,
+                              copy, data, to);
                        kunmap(page);
                        offset += n;
                        if (n != copy)
@@ -573,7 +573,7 @@ int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
                        if (copy > len)
                                copy = len;
                        copied = copy_page_from_iter(skb_frag_page(frag),
-                                         frag->page_offset + offset - start,
+                                         skb_frag_off(frag) + offset - start,
                                          copy, from);
                        if (copied != copy)
                                goto fault;
index fc676b2610e3c1e7e236b62d4984647bc0e71916..e2a11c62197b3ef1ea09f9897808e4985b6f2641 100644 (file)
@@ -5481,7 +5481,7 @@ static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
        skb->data_len -= grow;
        skb->tail += grow;
 
-       pinfo->frags[0].page_offset += grow;
+       skb_frag_off_add(&pinfo->frags[0], grow);
        skb_frag_size_sub(&pinfo->frags[0], grow);
 
        if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
index bb9915291644e26720c5bc82c8e02210ef1fbaa5..c5dbdc87342a88bb8e1d56fdc7d34f05895fad81 100644 (file)
@@ -2652,7 +2652,7 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
                        }
                        get_page(pkt_dev->page);
                        skb_frag_set_page(skb, i, pkt_dev->page);
-                       skb_shinfo(skb)->frags[i].page_offset = 0;
+                       skb_frag_off_set(&skb_shinfo(skb)->frags[i], 0);
                        /*last fragment, fill rest of data*/
                        if (i == (frags - 1))
                                skb_frag_size_set(&skb_shinfo(skb)->frags[i],
index 0b788df5a75b8abe33dfe425be9a20cb8ad04c11..ea8e8d332d85064e241bc716231991cb07a8dc40 100644 (file)
@@ -785,7 +785,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
                struct page *p;
                u8 *vaddr;
 
-               skb_frag_foreach_page(frag, frag->page_offset,
+               skb_frag_foreach_page(frag, skb_frag_off(frag),
                                      skb_frag_size(frag), p, p_off, p_len,
                                      copied) {
                        seg_len = min_t(int, p_len, len);
@@ -1375,7 +1375,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
                struct page *p;
                u8 *vaddr;
 
-               skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
+               skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
                                      p, p_off, p_len, copied) {
                        u32 copy, done = 0;
                        vaddr = kmap_atomic(p);
@@ -2144,10 +2144,12 @@ pull_pages:
                        skb_frag_unref(skb, i);
                        eat -= size;
                } else {
-                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+                       *frag = skb_shinfo(skb)->frags[i];
                        if (eat) {
-                               skb_shinfo(skb)->frags[k].page_offset += eat;
-                               skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
+                               skb_frag_off_add(frag, eat);
+                               skb_frag_size_sub(frag, eat);
                                if (!i)
                                        goto end;
                                eat = 0;
@@ -2219,7 +2221,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
                                copy = len;
 
                        skb_frag_foreach_page(f,
-                                             f->page_offset + offset - start,
+                                             skb_frag_off(f) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                memcpy(to + copied, vaddr + p_off, p_len);
@@ -2395,7 +2397,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
 
                if (__splice_segment(skb_frag_page(f),
-                                    f->page_offset, skb_frag_size(f),
+                                    skb_frag_off(f), skb_frag_size(f),
                                     offset, len, spd, false, sk, pipe))
                        return true;
        }
@@ -2498,7 +2500,7 @@ do_frag_list:
 
                while (slen) {
                        ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
-                                                    frag->page_offset + offset,
+                                                    skb_frag_off(frag) + offset,
                                                     slen, MSG_DONTWAIT);
                        if (ret <= 0)
                                goto error;
@@ -2580,7 +2582,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
                                copy = len;
 
                        skb_frag_foreach_page(frag,
-                                             frag->page_offset + offset - start,
+                                             skb_frag_off(frag) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                memcpy(vaddr + p_off, from + copied, p_len);
@@ -2660,7 +2662,7 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
                                copy = len;
 
                        skb_frag_foreach_page(frag,
-                                             frag->page_offset + offset - start,
+                                             skb_frag_off(frag) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                csum2 = INDIRECT_CALL_1(ops->update,
@@ -2759,7 +2761,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                                copy = len;
 
                        skb_frag_foreach_page(frag,
-                                             frag->page_offset + offset - start,
+                                             skb_frag_off(frag) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                csum2 = csum_partial_copy_nocheck(vaddr + p_off,
@@ -3234,7 +3236,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
                                 * 2. Split is accurately. We make this.
                                 */
                                skb_frag_ref(skb, i);
-                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+                               skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
                                skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
                                skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
                                skb_shinfo(skb)->nr_frags++;
@@ -3316,7 +3318,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
         */
        if (!to ||
            !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
-                             fragfrom->page_offset)) {
+                             skb_frag_off(fragfrom))) {
                merge = -1;
        } else {
                merge = to - 1;
@@ -3333,7 +3335,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 
                        skb_frag_size_add(fragto, shiftlen);
                        skb_frag_size_sub(fragfrom, shiftlen);
-                       fragfrom->page_offset += shiftlen;
+                       skb_frag_off_add(fragfrom, shiftlen);
 
                        goto onlymerged;
                }
@@ -3364,11 +3366,11 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 
                } else {
                        __skb_frag_ref(fragfrom);
-                       fragto->bv_page = fragfrom->bv_page;
-                       fragto->page_offset = fragfrom->page_offset;
+                       skb_frag_page_copy(fragto, fragfrom);
+                       skb_frag_off_copy(fragto, fragfrom);
                        skb_frag_size_set(fragto, todo);
 
-                       fragfrom->page_offset += todo;
+                       skb_frag_off_add(fragfrom, todo);
                        skb_frag_size_sub(fragfrom, todo);
                        todo = 0;
 
@@ -3493,7 +3495,7 @@ next_skb:
                        if (!st->frag_data)
                                st->frag_data = kmap_atomic(skb_frag_page(frag));
 
-                       *data = (u8 *) st->frag_data + frag->page_offset +
+                       *data = (u8 *) st->frag_data + skb_frag_off(frag) +
                                (abs_offset - st->stepped_offset);
 
                        return block_limit - abs_offset;
@@ -3630,8 +3632,8 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
 
        page = virt_to_head_page(frag_skb->head);
        __skb_frag_set_page(&head_frag, page);
-       head_frag.page_offset = frag_skb->data -
-               (unsigned char *)page_address(page);
+       skb_frag_off_set(&head_frag, frag_skb->data -
+                        (unsigned char *)page_address(page));
        skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
        return head_frag;
 }
@@ -3875,7 +3877,7 @@ normal:
                        size = skb_frag_size(nskb_frag);
 
                        if (pos < offset) {
-                               nskb_frag->page_offset += offset - pos;
+                               skb_frag_off_add(nskb_frag, offset - pos);
                                skb_frag_size_sub(nskb_frag, offset - pos);
                        }
 
@@ -3996,7 +3998,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
                        *--frag = *--frag2;
                } while (--i);
 
-               frag->page_offset += offset;
+               skb_frag_off_add(frag, offset);
                skb_frag_size_sub(frag, offset);
 
                /* all fragments truesize : remove (head size + sk_buff) */
@@ -4026,7 +4028,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
                pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
 
                __skb_frag_set_page(frag, page);
-               frag->page_offset = first_offset;
+               skb_frag_off_set(frag, first_offset);
                skb_frag_size_set(frag, first_size);
 
                memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
@@ -4042,7 +4044,7 @@ merge:
        if (offset > headlen) {
                unsigned int eat = offset - headlen;
 
-               skbinfo->frags[0].page_offset += eat;
+               skb_frag_off_add(&skbinfo->frags[0], eat);
                skb_frag_size_sub(&skbinfo->frags[0], eat);
                skb->data_len -= eat;
                skb->len -= eat;
@@ -4167,7 +4169,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
                        if (copy > len)
                                copy = len;
                        sg_set_page(&sg[elt], skb_frag_page(frag), copy,
-                                       frag->page_offset+offset-start);
+                                   skb_frag_off(frag) + offset - start);
                        elt++;
                        if (!(len -= copy))
                                return elt;
@@ -5838,7 +5840,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
                                 *    where splitting is expensive.
                                 * 2. Split is accurately. We make this.
                                 */
-                               shinfo->frags[0].page_offset += off - pos;
+                               skb_frag_off_add(&shinfo->frags[0], off - pos);
                                skb_frag_size_sub(&shinfo->frags[0], off - pos);
                        }
                        skb_frag_ref(skb, i);
index f62f0e7e3cdd370233faaef6d81c6415ade0f5b6..a0a66321c0ee99918b2080219dbaefcf3c398e13 100644 (file)
@@ -1782,12 +1782,12 @@ static int tcp_zerocopy_receive(struct sock *sk,
                                frags++;
                        }
                }
-               if (skb_frag_size(frags) != PAGE_SIZE || frags->page_offset) {
+               if (skb_frag_size(frags) != PAGE_SIZE || skb_frag_off(frags)) {
                        int remaining = zc->recv_skip_hint;
                        int size = skb_frag_size(frags);
 
                        while (remaining && (size != PAGE_SIZE ||
-                                            frags->page_offset)) {
+                                            skb_frag_off(frags))) {
                                remaining -= size;
                                frags++;
                                size = skb_frag_size(frags);
@@ -3784,7 +3784,7 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
 
        for (i = 0; i < shi->nr_frags; ++i) {
                const skb_frag_t *f = &shi->frags[i];
-               unsigned int offset = f->page_offset;
+               unsigned int offset = skb_frag_off(f);
                struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
 
                sg_set_page(&sg, page, skb_frag_size(f),
index 6e4afc48d7bba7cded4d3fe38f32ab02328f9e05..e6d02e05bb1c9fae3ac05ce0ab70a8308e800589 100644 (file)
@@ -1402,7 +1402,7 @@ static int __pskb_trim_head(struct sk_buff *skb, int len)
                } else {
                        shinfo->frags[k] = shinfo->frags[i];
                        if (eat) {
-                               shinfo->frags[k].page_offset += eat;
+                               skb_frag_off_add(&shinfo->frags[k], eat);
                                skb_frag_size_sub(&shinfo->frags[k], eat);
                                eat = 0;
                        }
index 05f63c4300e973379789c6be10186198a152c2e5..4ff75c3a8d6ea57559ec92c2e6a9b3761a441b70 100644 (file)
@@ -642,7 +642,7 @@ do_frag:
 
                        ret = kernel_sendpage(psock->sk->sk_socket,
                                              skb_frag_page(frag),
-                                             frag->page_offset + frag_offset,
+                                             skb_frag_off(frag) + frag_offset,
                                              skb_frag_size(frag) - frag_offset,
                                              MSG_DONTWAIT);
                        if (ret <= 0) {
index 4ec8a06fa5d1fc94b0ee6910c4c258758f8ef0be..d184230665eb649305f39f7a08779503cc57e585 100644 (file)
@@ -244,12 +244,12 @@ static void tls_append_frag(struct tls_record_info *record,
 
        frag = &record->frags[record->num_frags - 1];
        if (skb_frag_page(frag) == pfrag->page &&
-           frag->page_offset + skb_frag_size(frag) == pfrag->offset) {
+           skb_frag_off(frag) + skb_frag_size(frag) == pfrag->offset) {
                skb_frag_size_add(frag, size);
        } else {
                ++frag;
                __skb_frag_set_page(frag, pfrag->page);
-               frag->page_offset = pfrag->offset;
+               skb_frag_off_set(frag, pfrag->offset);
                skb_frag_size_set(frag, size);
                ++record->num_frags;
                get_page(pfrag->page);
@@ -301,7 +301,7 @@ static int tls_push_record(struct sock *sk,
                frag = &record->frags[i];
                sg_unmark_end(&offload_ctx->sg_tx_data[i]);
                sg_set_page(&offload_ctx->sg_tx_data[i], skb_frag_page(frag),
-                           skb_frag_size(frag), frag->page_offset);
+                           skb_frag_size(frag), skb_frag_off(frag));
                sk_mem_charge(sk, skb_frag_size(frag));
                get_page(skb_frag_page(frag));
        }
@@ -324,7 +324,7 @@ static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx,
 
        frag = &record->frags[0];
        __skb_frag_set_page(frag, pfrag->page);
-       frag->page_offset = pfrag->offset;
+       skb_frag_off_set(frag, pfrag->offset);
        skb_frag_size_set(frag, prepend_size);
 
        get_page(pfrag->page);
index 9070d68a92a4bf8a8efa0ff12bdc85cd66b71b51..28895333701e40f68b02cb62b4ad9aa845a05537 100644 (file)
@@ -273,7 +273,7 @@ static int fill_sg_in(struct scatterlist *sg_in,
 
                __skb_frag_ref(frag);
                sg_set_page(sg_in + i, skb_frag_page(frag),
-                           skb_frag_size(frag), frag->page_offset);
+                           skb_frag_size(frag), skb_frag_off(frag));
 
                remaining -= skb_frag_size(frag);
 
index 32c364d3bfb3f35781dece418fab395e12b84a3a..4d422447aadc363c2551d98c51532a2f5270c9d6 100644 (file)
@@ -85,7 +85,7 @@ static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
                if (dlen < len)
                        len = dlen;
 
-               frag->page_offset = 0;
+               skb_frag_off_set(frag, 0);
                skb_frag_size_set(frag, len);
                memcpy(skb_frag_address(frag), scratch, len);