bpf: sk_msg, improve offset chk in _is_valid_access
[linux-2.6-block.git] / net / core / filter.c
index 9a1327eb25faf2c73a37d253f88b19c6b92414d6..6bd9f08f6162fcf7d1e039eefbc664f116856001 100644 (file)
@@ -296,22 +296,18 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
                break;
 
        case SKF_AD_VLAN_TAG:
-       case SKF_AD_VLAN_TAG_PRESENT:
                BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
-               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
 
                /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
                *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
                                      offsetof(struct sk_buff, vlan_tci));
-               if (skb_field == SKF_AD_VLAN_TAG) {
-                       *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
-                                               ~VLAN_TAG_PRESENT);
-               } else {
-                       /* dst_reg >>= 12 */
-                       *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
-                       /* dst_reg &= 1 */
+               break;
+       case SKF_AD_VLAN_TAG_PRESENT:
+               *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
+               if (PKT_VLAN_PRESENT_BIT)
+                       *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
+               if (PKT_VLAN_PRESENT_BIT < 7)
                        *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
-               }
                break;
        }
 
@@ -467,7 +463,8 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
                bool ldx_off_ok = offset <= S16_MAX;
 
                *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
-               *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
+               if (offset)
+                       *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
                *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
                                      size, 2 + endian + (!ldx_off_ok * 2));
                if (ldx_off_ok) {
@@ -2428,6 +2425,174 @@ static const struct bpf_func_proto bpf_msg_push_data_proto = {
        .arg4_type      = ARG_ANYTHING,
 };
 
+static void sk_msg_shift_left(struct sk_msg *msg, int i)
+{
+       int prev;
+
+       do {
+               prev = i;
+               sk_msg_iter_var_next(i);
+               msg->sg.data[prev] = msg->sg.data[i];
+       } while (i != msg->sg.end);
+
+       sk_msg_iter_prev(msg, end);
+}
+
+static void sk_msg_shift_right(struct sk_msg *msg, int i)
+{
+       struct scatterlist tmp, sge;
+
+       sk_msg_iter_next(msg, end);
+       sge = sk_msg_elem_cpy(msg, i);
+       sk_msg_iter_var_next(i);
+       tmp = sk_msg_elem_cpy(msg, i);
+
+       while (i != msg->sg.end) {
+               msg->sg.data[i] = sge;
+               sk_msg_iter_var_next(i);
+               sge = tmp;
+               tmp = sk_msg_elem_cpy(msg, i);
+       }
+}
+
+BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+          u32, len, u64, flags)
+{
+       u32 i = 0, l, space, offset = 0;
+       u64 last = start + len;
+       int pop;
+
+       if (unlikely(flags))
+               return -EINVAL;
+
+       /* First find the starting scatterlist element */
+       i = msg->sg.start;
+       do {
+               l = sk_msg_elem(msg, i)->length;
+
+               if (start < offset + l)
+                       break;
+               offset += l;
+               sk_msg_iter_var_next(i);
+       } while (i != msg->sg.end);
+
+       /* Bounds checks: start and pop must be inside message */
+       if (start >= offset + l || last >= msg->sg.size)
+               return -EINVAL;
+
+       space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
+
+       pop = len;
+       /* --------------| offset
+        * -| start      |-------- len -------|
+        *
+        *  |----- a ----|-------- pop -------|----- b ----|
+        *  |______________________________________________| length
+        *
+        *
+        * a:   region at front of scatter element to save
+        * b:   region at back of scatter element to save when length > A + pop
+        * pop: region to pop from element, same as input 'pop' here will be
+        *      decremented below per iteration.
+        *
+        * Two top-level cases to handle when start != offset, first B is non
+        * zero and second B is zero corresponding to when a pop includes more
+        * than one element.
+        *
+        * Then if B is non-zero AND there is no space allocate space and
+        * compact A, B regions into page. If there is space shift ring to
+        * the rigth free'ing the next element in ring to place B, leaving
+        * A untouched except to reduce length.
+        */
+       if (start != offset) {
+               struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
+               int a = start;
+               int b = sge->length - pop - a;
+
+               sk_msg_iter_var_next(i);
+
+               if (pop < sge->length - a) {
+                       if (space) {
+                               sge->length = a;
+                               sk_msg_shift_right(msg, i);
+                               nsge = sk_msg_elem(msg, i);
+                               get_page(sg_page(sge));
+                               sg_set_page(nsge,
+                                           sg_page(sge),
+                                           b, sge->offset + pop + a);
+                       } else {
+                               struct page *page, *orig;
+                               u8 *to, *from;
+
+                               page = alloc_pages(__GFP_NOWARN |
+                                                  __GFP_COMP   | GFP_ATOMIC,
+                                                  get_order(a + b));
+                               if (unlikely(!page))
+                                       return -ENOMEM;
+
+                               sge->length = a;
+                               orig = sg_page(sge);
+                               from = sg_virt(sge);
+                               to = page_address(page);
+                               memcpy(to, from, a);
+                               memcpy(to + a, from + a + pop, b);
+                               sg_set_page(sge, page, a + b, 0);
+                               put_page(orig);
+                       }
+                       pop = 0;
+               } else if (pop >= sge->length - a) {
+                       sge->length = a;
+                       pop -= (sge->length - a);
+               }
+       }
+
+       /* From above the current layout _must_ be as follows,
+        *
+        * -| offset
+        * -| start
+        *
+        *  |---- pop ---|---------------- b ------------|
+        *  |____________________________________________| length
+        *
+        * Offset and start of the current msg elem are equal because in the
+        * previous case we handled offset != start and either consumed the
+        * entire element and advanced to the next element OR pop == 0.
+        *
+        * Two cases to handle here are first pop is less than the length
+        * leaving some remainder b above. Simply adjust the element's layout
+        * in this case. Or pop >= length of the element so that b = 0. In this
+        * case advance to next element decrementing pop.
+        */
+       while (pop) {
+               struct scatterlist *sge = sk_msg_elem(msg, i);
+
+               if (pop < sge->length) {
+                       sge->length -= pop;
+                       sge->offset += pop;
+                       pop = 0;
+               } else {
+                       pop -= sge->length;
+                       sk_msg_shift_left(msg, i);
+               }
+               sk_msg_iter_var_next(i);
+       }
+
+       sk_mem_uncharge(msg->sk, len - pop);
+       msg->sg.size -= (len - pop);
+       sk_msg_compute_data_pointers(msg);
+       return 0;
+}
+
+static const struct bpf_func_proto bpf_msg_pop_data_proto = {
+       .func           = bpf_msg_pop_data,
+       .gpl_only       = false,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_ANYTHING,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_ANYTHING,
+};
+
 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
 {
        return task_get_classid(skb);
@@ -3908,6 +4073,26 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
        .arg1_type      = ARG_PTR_TO_CTX,
 };
 
+BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
+          struct bpf_map *, map, u64, flags, void *, data, u64, size)
+{
+       if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
+               return -EINVAL;
+
+       return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
+}
+
+static const struct bpf_func_proto bpf_sockopt_event_output_proto =  {
+       .func           = bpf_sockopt_event_output,
+       .gpl_only       = true,
+       .ret_type       = RET_INTEGER,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_CONST_MAP_PTR,
+       .arg3_type      = ARG_ANYTHING,
+       .arg4_type      = ARG_PTR_TO_MEM,
+       .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
+};
+
 BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
           int, level, int, optname, char *, optval, int, optlen)
 {
@@ -4825,37 +5010,31 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
 
 #ifdef CONFIG_INET
 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
-                             struct sk_buff *skb, u8 family, u8 proto)
+                             int dif, int sdif, u8 family, u8 proto)
 {
        bool refcounted = false;
        struct sock *sk = NULL;
-       int dif = 0;
-
-       if (skb->dev)
-               dif = skb->dev->ifindex;
 
        if (family == AF_INET) {
                __be32 src4 = tuple->ipv4.saddr;
                __be32 dst4 = tuple->ipv4.daddr;
-               int sdif = inet_sdif(skb);
 
                if (proto == IPPROTO_TCP)
-                       sk = __inet_lookup(net, &tcp_hashinfo, skb, 0,
+                       sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
                                           src4, tuple->ipv4.sport,
                                           dst4, tuple->ipv4.dport,
                                           dif, sdif, &refcounted);
                else
                        sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
                                               dst4, tuple->ipv4.dport,
-                                              dif, sdif, &udp_table, skb);
+                                              dif, sdif, &udp_table, NULL);
 #if IS_ENABLED(CONFIG_IPV6)
        } else {
                struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
                struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
-               int sdif = inet6_sdif(skb);
 
                if (proto == IPPROTO_TCP)
-                       sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0,
+                       sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
                                            src6, tuple->ipv6.sport,
                                            dst6, ntohs(tuple->ipv6.dport),
                                            dif, sdif, &refcounted);
@@ -4864,7 +5043,7 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
                                                            src6, tuple->ipv6.sport,
                                                            dst6, tuple->ipv6.dport,
                                                            dif, sdif,
-                                                           &udp_table, skb);
+                                                           &udp_table, NULL);
 #endif
        }
 
@@ -4881,31 +5060,34 @@ static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
  * callers to satisfy BPF_CALL declarations.
  */
 static unsigned long
-bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
-             u8 proto, u64 netns_id, u64 flags)
+__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+               struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
+               u64 flags)
 {
-       struct net *caller_net;
        struct sock *sk = NULL;
        u8 family = AF_UNSPEC;
        struct net *net;
+       int sdif;
 
        family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
-       if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
+       if (unlikely(family == AF_UNSPEC || flags ||
+                    !((s32)netns_id < 0 || netns_id <= S32_MAX)))
                goto out;
 
-       if (skb->dev)
-               caller_net = dev_net(skb->dev);
+       if (family == AF_INET)
+               sdif = inet_sdif(skb);
        else
-               caller_net = sock_net(skb->sk);
-       if (netns_id) {
+               sdif = inet6_sdif(skb);
+
+       if ((s32)netns_id < 0) {
+               net = caller_net;
+               sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
+       } else {
                net = get_net_ns_by_id(caller_net, netns_id);
                if (unlikely(!net))
                        goto out;
-               sk = sk_lookup(net, tuple, skb, family, proto);
+               sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
                put_net(net);
-       } else {
-               net = caller_net;
-               sk = sk_lookup(net, tuple, skb, family, proto);
        }
 
        if (sk)
@@ -4914,6 +5096,25 @@ out:
        return (unsigned long) sk;
 }
 
+static unsigned long
+bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
+             u8 proto, u64 netns_id, u64 flags)
+{
+       struct net *caller_net;
+       int ifindex;
+
+       if (skb->dev) {
+               caller_net = dev_net(skb->dev);
+               ifindex = skb->dev->ifindex;
+       } else {
+               caller_net = sock_net(skb->sk);
+               ifindex = 0;
+       }
+
+       return __bpf_sk_lookup(skb, tuple, len, caller_net, ifindex,
+                             proto, netns_id, flags);
+}
+
 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
           struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
 {
@@ -4963,6 +5164,87 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
        .ret_type       = RET_INTEGER,
        .arg1_type      = ARG_PTR_TO_SOCKET,
 };
+
+BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
+          struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+       struct net *caller_net = dev_net(ctx->rxq->dev);
+       int ifindex = ctx->rxq->dev->ifindex;
+
+       return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
+                             IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
+       .func           = bpf_xdp_sk_lookup_udp,
+       .gpl_only       = false,
+       .pkt_access     = true,
+       .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
+          struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
+{
+       struct net *caller_net = dev_net(ctx->rxq->dev);
+       int ifindex = ctx->rxq->dev->ifindex;
+
+       return __bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex,
+                             IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
+       .func           = bpf_xdp_sk_lookup_tcp,
+       .gpl_only       = false,
+       .pkt_access     = true,
+       .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
+          struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+       return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
+                              IPPROTO_TCP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
+       .func           = bpf_sock_addr_sk_lookup_tcp,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
+};
+
+BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
+          struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
+{
+       return __bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0,
+                              IPPROTO_UDP, netns_id, flags);
+}
+
+static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
+       .func           = bpf_sock_addr_sk_lookup_udp,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
+       .arg1_type      = ARG_PTR_TO_CTX,
+       .arg2_type      = ARG_PTR_TO_MEM,
+       .arg3_type      = ARG_CONST_SIZE,
+       .arg4_type      = ARG_ANYTHING,
+       .arg5_type      = ARG_ANYTHING,
+};
+
 #endif /* CONFIG_INET */
 
 bool bpf_helper_changes_pkt_data(void *func)
@@ -4985,6 +5267,7 @@ bool bpf_helper_changes_pkt_data(void *func)
            func == bpf_xdp_adjust_meta ||
            func == bpf_msg_pull_data ||
            func == bpf_msg_push_data ||
+           func == bpf_msg_pop_data ||
            func == bpf_xdp_adjust_tail ||
 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
            func == bpf_lwt_seg6_store_bytes ||
@@ -5069,6 +5352,14 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_socket_cookie_sock_addr_proto;
        case BPF_FUNC_get_local_storage:
                return &bpf_get_local_storage_proto;
+#ifdef CONFIG_INET
+       case BPF_FUNC_sk_lookup_tcp:
+               return &bpf_sock_addr_sk_lookup_tcp_proto;
+       case BPF_FUNC_sk_lookup_udp:
+               return &bpf_sock_addr_sk_lookup_udp_proto;
+       case BPF_FUNC_sk_release:
+               return &bpf_sk_release_proto;
+#endif /* CONFIG_INET */
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -5213,6 +5504,14 @@ xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_xdp_adjust_tail_proto;
        case BPF_FUNC_fib_lookup:
                return &bpf_xdp_fib_lookup_proto;
+#ifdef CONFIG_INET
+       case BPF_FUNC_sk_lookup_udp:
+               return &bpf_xdp_sk_lookup_udp_proto;
+       case BPF_FUNC_sk_lookup_tcp:
+               return &bpf_xdp_sk_lookup_tcp_proto;
+       case BPF_FUNC_sk_release:
+               return &bpf_sk_release_proto;
+#endif
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -5239,6 +5538,8 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_get_socket_cookie_sock_ops_proto;
        case BPF_FUNC_get_local_storage:
                return &bpf_get_local_storage_proto;
+       case BPF_FUNC_perf_event_output:
+               return &bpf_sockopt_event_output_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -5263,6 +5564,8 @@ sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_msg_pull_data_proto;
        case BPF_FUNC_msg_push_data:
                return &bpf_msg_push_data_proto;
+       case BPF_FUNC_msg_pop_data:
+               return &bpf_msg_pop_data_proto;
        default:
                return bpf_base_func_proto(func_id);
        }
@@ -5435,8 +5738,12 @@ static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type
                if (size != size_default)
                        return false;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
-               if (size != sizeof(struct bpf_flow_keys *))
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+               if (size != sizeof(__u64))
+                       return false;
+               break;
+       case bpf_ctx_range(struct __sk_buff, tstamp):
+               if (size != sizeof(__u64))
                        return false;
                break;
        default:
@@ -5464,8 +5771,10 @@ static bool sk_filter_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_meta):
        case bpf_ctx_range(struct __sk_buff, data_end):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
+       case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
 
@@ -5489,7 +5798,8 @@ static bool cg_skb_is_valid_access(int off, int size,
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_end):
@@ -5504,6 +5814,10 @@ static bool cg_skb_is_valid_access(int off, int size,
                case bpf_ctx_range(struct __sk_buff, priority):
                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
                        break;
+               case bpf_ctx_range(struct __sk_buff, tstamp):
+                       if (!capable(CAP_SYS_ADMIN))
+                               return false;
+                       break;
                default:
                        return false;
                }
@@ -5530,7 +5844,9 @@ static bool lwt_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+       case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
 
@@ -5740,6 +6056,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
                case bpf_ctx_range(struct __sk_buff, priority):
                case bpf_ctx_range(struct __sk_buff, tc_classid):
                case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
+               case bpf_ctx_range(struct __sk_buff, tstamp):
                        break;
                default:
                        return false;
@@ -5756,7 +6073,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
                return false;
        }
@@ -5958,7 +6275,9 @@ static bool sk_skb_is_valid_access(int off, int size,
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+       case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
 
@@ -5994,6 +6313,9 @@ static bool sk_msg_is_valid_access(int off, int size,
        if (type == BPF_WRITE)
                return false;
 
+       if (off % size != 0)
+               return false;
+
        switch (off) {
        case offsetof(struct sk_msg_md, data):
                info->reg_type = PTR_TO_PACKET;
@@ -6005,16 +6327,20 @@ static bool sk_msg_is_valid_access(int off, int size,
                if (size != sizeof(__u64))
                        return false;
                break;
-       default:
+       case bpf_ctx_range(struct sk_msg_md, family):
+       case bpf_ctx_range(struct sk_msg_md, remote_ip4):
+       case bpf_ctx_range(struct sk_msg_md, local_ip4):
+       case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
+       case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
+       case bpf_ctx_range(struct sk_msg_md, remote_port):
+       case bpf_ctx_range(struct sk_msg_md, local_port):
+       case bpf_ctx_range(struct sk_msg_md, size):
                if (size != sizeof(__u32))
                        return false;
-       }
-
-       if (off < 0 || off >= sizeof(struct sk_msg_md))
-               return false;
-       if (off % size != 0)
+               break;
+       default:
                return false;
-
+       }
        return true;
 }
 
@@ -6039,12 +6365,14 @@ static bool flow_dissector_is_valid_access(int off, int size,
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
-       case bpf_ctx_range(struct __sk_buff, flow_keys):
+       case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
+       case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
 
@@ -6139,19 +6467,19 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                break;
 
        case offsetof(struct __sk_buff, vlan_present):
-       case offsetof(struct __sk_buff, vlan_tci):
-               BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+               *target_size = 1;
+               *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
+                                     PKT_VLAN_PRESENT_OFFSET());
+               if (PKT_VLAN_PRESENT_BIT)
+                       *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
+               if (PKT_VLAN_PRESENT_BIT < 7)
+                       *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
+               break;
 
+       case offsetof(struct __sk_buff, vlan_tci):
                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
                                      bpf_target_off(struct sk_buff, vlan_tci, 2,
                                                     target_size));
-               if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
-                       *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
-                                               ~VLAN_TAG_PRESENT);
-               } else {
-                       *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
-                       *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
-               }
                break;
 
        case offsetof(struct __sk_buff, cb[0]) ...
@@ -6354,6 +6682,33 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type,
                *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
                                      si->src_reg, off);
                break;
+
+       case offsetof(struct __sk_buff, tstamp):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
+
+               if (type == BPF_WRITE)
+                       *insn++ = BPF_STX_MEM(BPF_DW,
+                                             si->dst_reg, si->src_reg,
+                                             bpf_target_off(struct sk_buff,
+                                                            tstamp, 8,
+                                                            target_size));
+               else
+                       *insn++ = BPF_LDX_MEM(BPF_DW,
+                                             si->dst_reg, si->src_reg,
+                                             bpf_target_off(struct sk_buff,
+                                                            tstamp, 8,
+                                                            target_size));
+               break;
+
+       case offsetof(struct __sk_buff, wire_len):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
+
+               off = si->off;
+               off -= offsetof(struct __sk_buff, wire_len);
+               off += offsetof(struct sk_buff, cb);
+               off += offsetof(struct qdisc_skb_cb, pkt_len);
+               *target_size = 4;
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
        }
 
        return insn - insn_buf;
@@ -7182,6 +7537,12 @@ static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
                *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
                                      offsetof(struct sock_common, skc_num));
                break;
+
+       case offsetof(struct sk_msg_md, size):
+               *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
+                                     si->dst_reg, si->src_reg,
+                                     offsetof(struct sk_msg_sg, size));
+               break;
        }
 
        return insn - insn_buf;