Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[linux-2.6-block.git] / net / core / skbuff.c
index 982d8d12830e411a2f262da3dffbc407ab40ff64..f12e8a050edb52b26170cf5d129018dac0a65de9 100644 (file)
@@ -785,7 +785,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
                struct page *p;
                u8 *vaddr;
 
-               skb_frag_foreach_page(frag, frag->page_offset,
+               skb_frag_foreach_page(frag, skb_frag_off(frag),
                                      skb_frag_size(frag), p, p_off, p_len,
                                      copied) {
                        seg_len = min_t(int, p_len, len);
@@ -1375,7 +1375,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
                struct page *p;
                u8 *vaddr;
 
-               skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
+               skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
                                      p, p_off, p_len, copied) {
                        u32 copy, done = 0;
                        vaddr = kmap_atomic(p);
@@ -2144,10 +2144,12 @@ pull_pages:
                        skb_frag_unref(skb, i);
                        eat -= size;
                } else {
-                       skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
+                       skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
+
+                       *frag = skb_shinfo(skb)->frags[i];
                        if (eat) {
-                               skb_shinfo(skb)->frags[k].page_offset += eat;
-                               skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
+                               skb_frag_off_add(frag, eat);
+                               skb_frag_size_sub(frag, eat);
                                if (!i)
                                        goto end;
                                eat = 0;
@@ -2219,7 +2221,7 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
                                copy = len;
 
                        skb_frag_foreach_page(f,
-                                             f->page_offset + offset - start,
+                                             skb_frag_off(f) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                memcpy(to + copied, vaddr + p_off, p_len);
@@ -2395,7 +2397,7 @@ static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
                const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
 
                if (__splice_segment(skb_frag_page(f),
-                                    f->page_offset, skb_frag_size(f),
+                                    skb_frag_off(f), skb_frag_size(f),
                                     offset, len, spd, false, sk, pipe))
                        return true;
        }
@@ -2485,20 +2487,20 @@ do_frag_list:
        for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
 
-               if (offset < frag->size)
+               if (offset < skb_frag_size(frag))
                        break;
 
-               offset -= frag->size;
+               offset -= skb_frag_size(frag);
        }
 
        for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
                skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
 
-               slen = min_t(size_t, len, frag->size - offset);
+               slen = min_t(size_t, len, skb_frag_size(frag) - offset);
 
                while (slen) {
-                       ret = kernel_sendpage_locked(sk, frag->page.p,
-                                                    frag->page_offset + offset,
+                       ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
+                                                    skb_frag_off(frag) + offset,
                                                     slen, MSG_DONTWAIT);
                        if (ret <= 0)
                                goto error;
@@ -2580,7 +2582,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
                                copy = len;
 
                        skb_frag_foreach_page(frag,
-                                             frag->page_offset + offset - start,
+                                             skb_frag_off(frag) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                memcpy(vaddr + p_off, from + copied, p_len);
@@ -2660,7 +2662,7 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
                                copy = len;
 
                        skb_frag_foreach_page(frag,
-                                             frag->page_offset + offset - start,
+                                             skb_frag_off(frag) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                csum2 = INDIRECT_CALL_1(ops->update,
@@ -2759,7 +2761,7 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
                                copy = len;
 
                        skb_frag_foreach_page(frag,
-                                             frag->page_offset + offset - start,
+                                             skb_frag_off(frag) + offset - start,
                                              copy, p, p_off, p_len, copied) {
                                vaddr = kmap_atomic(p);
                                csum2 = csum_partial_copy_nocheck(vaddr + p_off,
@@ -2975,11 +2977,15 @@ skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
        skb_zerocopy_clone(to, from, GFP_ATOMIC);
 
        for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
+               int size;
+
                if (!len)
                        break;
                skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
-               skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
-               len -= skb_shinfo(to)->frags[j].size;
+               size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
+                                       len);
+               skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
+               len -= size;
                skb_frag_ref(to, j);
                j++;
        }
@@ -3230,7 +3236,7 @@ static inline void skb_split_no_header(struct sk_buff *skb,
                                 * 2. Split is accurately. We make this.
                                 */
                                skb_frag_ref(skb, i);
-                               skb_shinfo(skb1)->frags[0].page_offset += len - pos;
+                               skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
                                skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
                                skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
                                skb_shinfo(skb)->nr_frags++;
@@ -3293,7 +3299,7 @@ static int skb_prepare_for_shift(struct sk_buff *skb)
 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 {
        int from, to, merge, todo;
-       struct skb_frag_struct *fragfrom, *fragto;
+       skb_frag_t *fragfrom, *fragto;
 
        BUG_ON(shiftlen > skb->len);
 
@@ -3312,7 +3318,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
         */
        if (!to ||
            !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
-                             fragfrom->page_offset)) {
+                             skb_frag_off(fragfrom))) {
                merge = -1;
        } else {
                merge = to - 1;
@@ -3329,7 +3335,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 
                        skb_frag_size_add(fragto, shiftlen);
                        skb_frag_size_sub(fragfrom, shiftlen);
-                       fragfrom->page_offset += shiftlen;
+                       skb_frag_off_add(fragfrom, shiftlen);
 
                        goto onlymerged;
                }
@@ -3360,11 +3366,11 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
 
                } else {
                        __skb_frag_ref(fragfrom);
-                       fragto->page = fragfrom->page;
-                       fragto->page_offset = fragfrom->page_offset;
+                       skb_frag_page_copy(fragto, fragfrom);
+                       skb_frag_off_copy(fragto, fragfrom);
                        skb_frag_size_set(fragto, todo);
 
-                       fragfrom->page_offset += todo;
+                       skb_frag_off_add(fragfrom, todo);
                        skb_frag_size_sub(fragfrom, todo);
                        todo = 0;
 
@@ -3489,7 +3495,7 @@ next_skb:
                        if (!st->frag_data)
                                st->frag_data = kmap_atomic(skb_frag_page(frag));
 
-                       *data = (u8 *) st->frag_data + frag->page_offset +
+                       *data = (u8 *) st->frag_data + skb_frag_off(frag) +
                                (abs_offset - st->stepped_offset);
 
                        return block_limit - abs_offset;
@@ -3625,10 +3631,10 @@ static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
        struct page *page;
 
        page = virt_to_head_page(frag_skb->head);
-       head_frag.page.p = page;
-       head_frag.page_offset = frag_skb->data -
-               (unsigned char *)page_address(page);
-       head_frag.size = skb_headlen(frag_skb);
+       __skb_frag_set_page(&head_frag, page);
+       skb_frag_off_set(&head_frag, frag_skb->data -
+                        (unsigned char *)page_address(page));
+       skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
        return head_frag;
 }
 
@@ -3890,7 +3896,7 @@ normal:
                        size = skb_frag_size(nskb_frag);
 
                        if (pos < offset) {
-                               nskb_frag->page_offset += offset - pos;
+                               skb_frag_off_add(nskb_frag, offset - pos);
                                skb_frag_size_sub(nskb_frag, offset - pos);
                        }
 
@@ -4011,7 +4017,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
                        *--frag = *--frag2;
                } while (--i);
 
-               frag->page_offset += offset;
+               skb_frag_off_add(frag, offset);
                skb_frag_size_sub(frag, offset);
 
                /* all fragments truesize : remove (head size + sk_buff) */
@@ -4040,8 +4046,8 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
 
                pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
 
-               frag->page.p      = page;
-               frag->page_offset = first_offset;
+               __skb_frag_set_page(frag, page);
+               skb_frag_off_set(frag, first_offset);
                skb_frag_size_set(frag, first_size);
 
                memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
@@ -4057,7 +4063,7 @@ merge:
        if (offset > headlen) {
                unsigned int eat = offset - headlen;
 
-               skbinfo->frags[0].page_offset += eat;
+               skb_frag_off_add(&skbinfo->frags[0], eat);
                skb_frag_size_sub(&skbinfo->frags[0], eat);
                skb->data_len -= eat;
                skb->len -= eat;
@@ -4100,6 +4106,9 @@ static const u8 skb_ext_type_len[] = {
 #ifdef CONFIG_XFRM
        [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
 #endif
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+       [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
+#endif
 };
 
 static __always_inline unsigned int skb_ext_total_length(void)
@@ -4110,6 +4119,9 @@ static __always_inline unsigned int skb_ext_total_length(void)
 #endif
 #ifdef CONFIG_XFRM
                skb_ext_type_len[SKB_EXT_SEC_PATH] +
+#endif
+#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
+               skb_ext_type_len[TC_SKB_EXT] +
 #endif
                0;
 }
@@ -4182,7 +4194,7 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
                        if (copy > len)
                                copy = len;
                        sg_set_page(&sg[elt], skb_frag_page(frag), copy,
-                                       frag->page_offset+offset-start);
+                                   skb_frag_off(frag) + offset - start);
                        elt++;
                        if (!(len -= copy))
                                return elt;
@@ -5853,7 +5865,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
                                 *    where splitting is expensive.
                                 * 2. Split is accurately. We make this.
                                 */
-                               shinfo->frags[0].page_offset += off - pos;
+                               skb_frag_off_add(&shinfo->frags[0], off - pos);
                                skb_frag_size_sub(&shinfo->frags[0], off - pos);
                        }
                        skb_frag_ref(skb, i);