net_sched: avoid shift-out-of-bounds in tcindex_set_parms()
[linux-2.6-block.git] / net / core / skbuff.c
index f62cae3f75d877adec1efbf9726c42515f3912e8..c1a6f262636a15c828100c8e80cd5021bebe89f7 100644 (file)
@@ -501,13 +501,17 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
 struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
                                 gfp_t gfp_mask)
 {
-       struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
+       struct napi_alloc_cache *nc;
        struct sk_buff *skb;
        void *data;
 
        len += NET_SKB_PAD + NET_IP_ALIGN;
 
-       if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
+       /* If requested length is either too small or too big,
+        * we use kmalloc() for skb->head allocation.
+        */
+       if (len <= SKB_WITH_OVERHEAD(1024) ||
+           len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
            (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
                skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
                if (!skb)
@@ -515,6 +519,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
                goto skb_success;
        }
 
+       nc = this_cpu_ptr(&napi_alloc_cache);
        len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
        len = SKB_DATA_ALIGN(len);
 
@@ -3442,6 +3447,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
        st->root_skb = st->cur_skb = skb;
        st->frag_idx = st->stepped_offset = 0;
        st->frag_data = NULL;
+       st->frag_off = 0;
 }
 EXPORT_SYMBOL(skb_prepare_seq_read);
 
@@ -3496,14 +3502,27 @@ next_skb:
                st->stepped_offset += skb_headlen(st->cur_skb);
 
        while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
+               unsigned int pg_idx, pg_off, pg_sz;
+
                frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
-               block_limit = skb_frag_size(frag) + st->stepped_offset;
 
+               pg_idx = 0;
+               pg_off = skb_frag_off(frag);
+               pg_sz = skb_frag_size(frag);
+
+               if (skb_frag_must_loop(skb_frag_page(frag))) {
+                       pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
+                       pg_off = offset_in_page(pg_off + st->frag_off);
+                       pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
+                                                   PAGE_SIZE - pg_off);
+               }
+
+               block_limit = pg_sz + st->stepped_offset;
                if (abs_offset < block_limit) {
                        if (!st->frag_data)
-                               st->frag_data = kmap_atomic(skb_frag_page(frag));
+                               st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
 
-                       *data = (u8 *) st->frag_data + skb_frag_off(frag) +
+                       *data = (u8 *)st->frag_data + pg_off +
                                (abs_offset - st->stepped_offset);
 
                        return block_limit - abs_offset;
@@ -3514,8 +3533,12 @@ next_skb:
                        st->frag_data = NULL;
                }
 
-               st->frag_idx++;
-               st->stepped_offset += skb_frag_size(frag);
+               st->stepped_offset += pg_sz;
+               st->frag_off += pg_sz;
+               if (st->frag_off == skb_frag_size(frag)) {
+                       st->frag_off = 0;
+                       st->frag_idx++;
+               }
        }
 
        if (st->frag_data) {
@@ -3655,7 +3678,8 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
        unsigned int delta_truesize = 0;
        unsigned int delta_len = 0;
        struct sk_buff *tail = NULL;
-       struct sk_buff *nskb;
+       struct sk_buff *nskb, *tmp;
+       int err;
 
        skb_push(skb, -skb_network_offset(skb) + offset);
 
@@ -3665,11 +3689,28 @@ struct sk_buff *skb_segment_list(struct sk_buff *skb,
                nskb = list_skb;
                list_skb = list_skb->next;
 
+               err = 0;
+               if (skb_shared(nskb)) {
+                       tmp = skb_clone(nskb, GFP_ATOMIC);
+                       if (tmp) {
+                               consume_skb(nskb);
+                               nskb = tmp;
+                               err = skb_unclone(nskb, GFP_ATOMIC);
+                       } else {
+                               err = -ENOMEM;
+                       }
+               }
+
                if (!tail)
                        skb->next = nskb;
                else
                        tail->next = nskb;
 
+               if (unlikely(err)) {
+                       nskb->next = list_skb;
+                       goto err_linearize;
+               }
+
                tail = nskb;
 
                delta_len += nskb->len;