Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
authorDavid S. Miller <davem@davemloft.net>
Tue, 11 Dec 2018 02:00:43 +0000 (18:00 -0800)
committerDavid S. Miller <davem@davemloft.net>
Tue, 11 Dec 2018 02:00:43 +0000 (18:00 -0800)
Daniel Borkmann says:

====================
pull-request: bpf-next 2018-12-11

The following pull-request contains BPF updates for your *net-next* tree.

It has three minor merge conflicts, resolutions:

1) tools/testing/selftests/bpf/test_verifier.c

 Take first chunk with alignment_prevented_execution.

2) net/core/filter.c

  [...]
  case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
  case bpf_ctx_range(struct __sk_buff, wire_len):
        return false;
  [...]

3) include/uapi/linux/bpf.h

  Take the second chunk for the two cases each.

The main changes are:

1) Add support for BPF line info via BTF and extend libbpf as well
   as bpftool's program dump to annotate output with BPF C code to
   facilitate debugging and introspection, from Martin.

2) Add support for BPF_ALU | BPF_ARSH | BPF_{K,X} in interpreter
   and all JIT backends, from Jiong.

3) Improve BPF test coverage on archs with no efficient unaligned
   access by adding an "any alignment" flag to the BPF program load
   to forcefully disable verifier alignment checks, from David.

4) Add a new bpf_prog_test_run_xattr() API to libbpf which allows for
   proper use of BPF_PROG_TEST_RUN with data_out, from Lorenz.

5) Extend tc BPF programs to use a new __sk_buff field called wire_len
   for more accurate accounting of packets going to wire, from Petar.

6) Improve bpftool to allow dumping the trace pipe from it and add
   several improvements in bash completion and map/prog dump,
   from Quentin.

7) Optimize arm64 BPF JIT to always emit movn/movk/movk sequence for
   kernel addresses and add a dedicated BPF JIT backend allocator,
   from Ard.

8) Add a BPF helper function for IR remotes to report mouse movements,
   from Sean.

9) Various cleanups in BPF prog dump e.g. to make UAPI bpf_prog_info
   member naming consistent with existing conventions, from Yonghong
   and Song.

10) Misc cleanups and improvements in allowing to pass interface name
    via cmdline for xdp1 BPF example, from Matteo.

11) Fix a potential segfault in BPF sample loader's kprobes handling,
    from Daniel T.

12) Fix SPDX license in libbpf's README.rst, from Andrey.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
12 files changed:
1  2 
arch/powerpc/net/bpf_jit_comp64.c
include/linux/filter.h
include/uapi/linux/bpf.h
kernel/bpf/btf.c
kernel/bpf/verifier.c
net/bpf/test_run.c
net/core/filter.c
tools/bpf/bpftool/btf_dumper.c
tools/include/uapi/linux/bpf.h
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_verifier.c

index 9393e231cbc2813e1f24c4484c62bb87fbf24a53,7dc81877057d8e4947ee3a06d303c56b9a4394ff..7ce57657d3b8f07696185120dc56516aacbbb625
@@@ -529,9 -529,15 +529,15 @@@ static int bpf_jit_build_body(struct bp
                        if (imm != 0)
                                PPC_SRDI(dst_reg, dst_reg, imm);
                        break;
+               case BPF_ALU | BPF_ARSH | BPF_X: /* (s32) dst >>= src */
+                       PPC_SRAW(dst_reg, dst_reg, src_reg);
+                       goto bpf_alu32_trunc;
                case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
                        PPC_SRAD(dst_reg, dst_reg, src_reg);
                        break;
+               case BPF_ALU | BPF_ARSH | BPF_K: /* (s32) dst >>= imm */
+                       PPC_SRAWI(dst_reg, dst_reg, imm);
+                       goto bpf_alu32_trunc;
                case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
                        if (imm != 0)
                                PPC_SRADI(dst_reg, dst_reg, imm);
@@@ -891,55 -897,6 +897,55 @@@ cond_branch
        return 0;
  }
  
 +/* Fix the branch target addresses for subprog calls */
 +static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
 +                                     struct codegen_context *ctx, u32 *addrs)
 +{
 +      const struct bpf_insn *insn = fp->insnsi;
 +      bool func_addr_fixed;
 +      u64 func_addr;
 +      u32 tmp_idx;
 +      int i, ret;
 +
 +      for (i = 0; i < fp->len; i++) {
 +              /*
 +               * During the extra pass, only the branch target addresses for
 +               * the subprog calls need to be fixed. All other instructions
 +               * can left untouched.
 +               *
 +               * The JITed image length does not change because we already
 +               * ensure that the JITed instruction sequence for these calls
 +               * are of fixed length by padding them with NOPs.
 +               */
 +              if (insn[i].code == (BPF_JMP | BPF_CALL) &&
 +                  insn[i].src_reg == BPF_PSEUDO_CALL) {
 +                      ret = bpf_jit_get_func_addr(fp, &insn[i], true,
 +                                                  &func_addr,
 +                                                  &func_addr_fixed);
 +                      if (ret < 0)
 +                              return ret;
 +
 +                      /*
 +                       * Save ctx->idx as this would currently point to the
 +                       * end of the JITed image and set it to the offset of
 +                       * the instruction sequence corresponding to the
 +                       * subprog call temporarily.
 +                       */
 +                      tmp_idx = ctx->idx;
 +                      ctx->idx = addrs[i] / 4;
 +                      bpf_jit_emit_func_call_rel(image, ctx, func_addr);
 +
 +                      /*
 +                       * Restore ctx->idx here. This is safe as the length
 +                       * of the JITed sequence remains unchanged.
 +                       */
 +                      ctx->idx = tmp_idx;
 +              }
 +      }
 +
 +      return 0;
 +}
 +
  struct powerpc64_jit_data {
        struct bpf_binary_header *header;
        u32 *addrs;
@@@ -1038,22 -995,6 +1044,22 @@@ struct bpf_prog *bpf_int_jit_compile(st
  skip_init_ctx:
        code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
  
 +      if (extra_pass) {
 +              /*
 +               * Do not touch the prologue and epilogue as they will remain
 +               * unchanged. Only fix the branch target address for subprog
 +               * calls in the body.
 +               *
 +               * This does not change the offsets and lengths of the subprog
 +               * call instruction sequences and hence, the size of the JITed
 +               * image as well.
 +               */
 +              bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
 +
 +              /* There is no need to perform the usual passes. */
 +              goto skip_codegen_passes;
 +      }
 +
        /* Code generation passes 1-2 */
        for (pass = 1; pass < 3; pass++) {
                /* Now build the prologue, body code & epilogue for real. */
                                proglen - (cgctx.idx * 4), cgctx.seen);
        }
  
 +skip_codegen_passes:
        if (bpf_jit_enable > 1)
                /*
                 * Note that we output the base address of the code_base
diff --combined include/linux/filter.h
index 5a26a7caa98f07a22af5b3a1c8437b8daabd701e,29f21f9d7f68f6c9ef338a69e3072a9d99bc98a1..537e9e7c6e6facddc2b5d8da1ada09aeccb560c4
@@@ -449,13 -449,6 +449,13 @@@ struct sock_reuseport
        offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
  #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2)                            \
        offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1
 +#if BITS_PER_LONG == 64
 +# define bpf_ctx_range_ptr(TYPE, MEMBER)                                      \
 +      offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1
 +#else
 +# define bpf_ctx_range_ptr(TYPE, MEMBER)                                      \
 +      offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1
 +#endif /* BITS_PER_LONG == 64 */
  
  #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE)                          \
        ({                                                                      \
@@@ -725,6 -718,13 +725,13 @@@ void bpf_prog_free(struct bpf_prog *fp)
  
  bool bpf_opcode_in_insntable(u8 code);
  
+ void bpf_prog_free_linfo(struct bpf_prog *prog);
+ void bpf_prog_fill_jited_linfo(struct bpf_prog *prog,
+                              const u32 *insn_to_jit_off);
+ int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog);
+ void bpf_prog_free_jited_linfo(struct bpf_prog *prog);
+ void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog);
  struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags);
  struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
                                  gfp_t gfp_extra_flags);
diff --combined include/uapi/linux/bpf.h
index ec8b40ff386e3ac8cecbaf9ce6ef611228485b13,f943ed803309922f3645444963c2eed78dd65284..92e962ba0c4724794000708e8daf4099c8a5bd08
@@@ -232,6 -232,20 +232,20 @@@ enum bpf_attach_type 
   */
  #define BPF_F_STRICT_ALIGNMENT        (1U << 0)
  
+ /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+  * verifier will allow any alignment whatsoever.  On platforms
+  * with strict alignment requirements for loads ands stores (such
+  * as sparc and mips) the verifier validates that all loads and
+  * stores provably follow this requirement.  This flag turns that
+  * checking and enforcement off.
+  *
+  * It is mostly used for testing when we want to validate the
+  * context and memory access aspects of the verifier, but because
+  * of an unaligned access the alignment check would trigger before
+  * the one we are interested in.
+  */
+ #define BPF_F_ANY_ALIGNMENT   (1U << 1)
  /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
  #define BPF_PSEUDO_MAP_FD     1
  
@@@ -342,6 -356,9 +356,9 @@@ union bpf_attr 
                __u32           func_info_rec_size;     /* userspace bpf_func_info size */
                __aligned_u64   func_info;      /* func info */
                __u32           func_info_cnt;  /* number of bpf_func_info records */
+               __u32           line_info_rec_size;     /* userspace bpf_line_info size */
+               __aligned_u64   line_info;      /* line info */
+               __u32           line_info_cnt;  /* number of bpf_line_info records */
        };
  
        struct { /* anonymous struct used by BPF_OBJ_* commands */
        struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
                __u32           prog_fd;
                __u32           retval;
-               __u32           data_size_in;
-               __u32           data_size_out;
+               __u32           data_size_in;   /* input: len of data_in */
+               __u32           data_size_out;  /* input/output: len of data_out
+                                                *   returns ENOSPC if data_out
+                                                *   is too small.
+                                                */
                __aligned_u64   data_in;
                __aligned_u64   data_out;
                __u32           repeat;
   *    Return
   *            0 on success, or a negative error in case of failure.
   *
-  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
-  *    Description
-  *            Pop an element from *map*.
-  * Return
-  *            0 on success, or a negative error in case of failure.
-  *
-  * int bpf_map_peek_elem(struct bpf_map *map, void *value)
-  *    Description
-  *            Get an element from *map* without removing it.
-  * Return
-  *            0 on success, or a negative error in case of failure.
-  *
   * int bpf_probe_read(void *dst, u32 size, const void *src)
   *    Description
   *            For tracing programs, safely attempt to read *size* bytes from
   *            is set to metric from route (IPv4/IPv6 only), and ifindex
   *            is set to the device index of the nexthop from the FIB lookup.
   *
-  *             *plen* argument is the size of the passed in struct.
-  *             *flags* argument can be a combination of one or more of the
-  *             following values:
+  *            *plen* argument is the size of the passed in struct.
+  *            *flags* argument can be a combination of one or more of the
+  *            following values:
   *
   *            **BPF_FIB_LOOKUP_DIRECT**
   *                    Do a direct table lookup vs full lookup using FIB
   *                    Perform lookup from an egress perspective (default is
   *                    ingress).
   *
-  *             *ctx* is either **struct xdp_md** for XDP programs or
-  *             **struct sk_buff** tc cls_act programs.
-  *     Return
+  *            *ctx* is either **struct xdp_md** for XDP programs or
+  *            **struct sk_buff** tc cls_act programs.
+  *    Return
   *            * < 0 if any input argument is invalid
   *            *   0 on success (packet is forwarded, nexthop neighbor exists)
   *            * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
   *            translated to a keycode using the rc keymap, and reported as
   *            an input key down event. After a period a key up event is
   *            generated. This period can be extended by calling either
-  *            **bpf_rc_keydown** () again with the same values, or calling
-  *            **bpf_rc_repeat** ().
+  *            **bpf_rc_keydown**\ () again with the same values, or calling
+  *            **bpf_rc_repeat**\ ().
   *
   *            Some protocols include a toggle bit, in case the button was
   *            released and pressed again between consecutive scancodes.
   *            The *flags* meaning is specific for each map type,
   *            and has to be 0 for cgroup local storage.
   *
-  *            Depending on the bpf program type, a local storage area
-  *            can be shared between multiple instances of the bpf program,
+  *            Depending on the BPF program type, a local storage area
+  *            can be shared between multiple instances of the BPF program,
   *            running simultaneously.
   *
   *            A user should care about the synchronization by himself.
-  *            For example, by using the BPF_STX_XADD instruction to alter
+  *            For example, by using the **BPF_STX_XADD** instruction to alter
   *            the shared data.
   *    Return
-  *            Pointer to the local storage area.
+  *            A pointer to the local storage area.
   *
   * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
   *    Description
-  *            Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
-  *            It checks the selected sk is matching the incoming
-  *            request in the skb.
+  *            Select a **SO_REUSEPORT** socket from a
+  *            **BPF_MAP_TYPE_REUSEPORT_ARRAY** *map*.
+  *            It checks the selected socket is matching the incoming
+  *            request in the socket buffer.
   *    Return
   *            0 on success, or a negative error in case of failure.
   *
 - * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
 + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
   *    Description
   *            Look for TCP socket matching *tuple*, optionally in a child
   *            network namespace *netns*. The return value must be checked,
-  *            and if non-NULL, released via **bpf_sk_release**\ ().
+  *            and if non-**NULL**, released via **bpf_sk_release**\ ().
   *
   *            The *ctx* should point to the context of the program, such as
   *            the skb or socket (depending on the hook in use). This is used
   *            **sizeof**\ (*tuple*\ **->ipv6**)
   *                    Look for an IPv6 socket.
   *
 - *            If the *netns* is zero, then the socket lookup table in the
 - *            netns associated with the *ctx* will be used. For the TC hooks,
 - *            this in the netns of the device in the skb. For socket hooks,
 - *            this in the netns of the socket. If *netns* is non-zero, then
 - *            it specifies the ID of the netns relative to the netns
 - *            associated with the *ctx*.
 + *            If the *netns* is a negative signed 32-bit integer, then the
 + *            socket lookup table in the netns associated with the *ctx* will
 + *            will be used. For the TC hooks, this is the netns of the device
 + *            in the skb. For socket hooks, this is the netns of the socket.
 + *            If *netns* is any other signed 32-bit value greater than or
 + *            equal to zero then it specifies the ID of the netns relative to
 + *            the netns associated with the *ctx*. *netns* values beyond the
 + *            range of 32-bit integers are reserved for future use.
   *
   *            All values for *flags* are reserved for future usage, and must
   *            be left at zero.
   *            This helper is available only if the kernel was compiled with
   *            **CONFIG_NET** configuration option.
   *    Return
 - *            A pointer to *struct bpf_sock*, or **NULL** in case of failure.
 - *            For sockets with reuseport option, **struct bpf_sock**
 - *            return is from **reuse->socks**\ [] using hash of the packet.
 + *            Pointer to *struct bpf_sock*, or NULL in case of failure.
 + *            For sockets with reuseport option, the *struct bpf_sock*
 + *            result is from reuse->socks[] using the hash of the tuple.
   *
 - * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
 + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
   *    Description
   *            Look for UDP socket matching *tuple*, optionally in a child
   *            network namespace *netns*. The return value must be checked,
-  *            and if non-NULL, released via **bpf_sk_release**\ ().
+  *            and if non-**NULL**, released via **bpf_sk_release**\ ().
   *
   *            The *ctx* should point to the context of the program, such as
   *            the skb or socket (depending on the hook in use). This is used
   *            **sizeof**\ (*tuple*\ **->ipv6**)
   *                    Look for an IPv6 socket.
   *
 - *            If the *netns* is zero, then the socket lookup table in the
 - *            netns associated with the *ctx* will be used. For the TC hooks,
 - *            this in the netns of the device in the skb. For socket hooks,
 - *            this in the netns of the socket. If *netns* is non-zero, then
 - *            it specifies the ID of the netns relative to the netns
 - *            associated with the *ctx*.
 + *            If the *netns* is a negative signed 32-bit integer, then the
 + *            socket lookup table in the netns associated with the *ctx* will
 + *            will be used. For the TC hooks, this is the netns of the device
 + *            in the skb. For socket hooks, this is the netns of the socket.
 + *            If *netns* is any other signed 32-bit value greater than or
 + *            equal to zero then it specifies the ID of the netns relative to
 + *            the netns associated with the *ctx*. *netns* values beyond the
 + *            range of 32-bit integers are reserved for future use.
   *
   *            All values for *flags* are reserved for future usage, and must
   *            be left at zero.
   *            This helper is available only if the kernel was compiled with
   *            **CONFIG_NET** configuration option.
   *    Return
 - *            A pointer to **struct bpf_sock**, or **NULL** in case of
 - *            failure. For sockets with reuseport option, **struct bpf_sock**
 - *            return is from **reuse->socks**\ [] using hash of the packet.
 + *            Pointer to *struct bpf_sock*, or NULL in case of failure.
 + *            For sockets with reuseport option, the *struct bpf_sock*
 + *            result is from reuse->socks[] using the hash of the tuple.
   *
-  * int bpf_sk_release(struct bpf_sock *sk)
+  * int bpf_sk_release(struct bpf_sock *sock)
   *    Description
-  *            Release the reference held by *sock*. *sock* must be a non-NULL
-  *            pointer that was returned from bpf_sk_lookup_xxx\ ().
+  *            Release the reference held by *sock*. *sock* must be a
+  *            non-**NULL** pointer that was returned from
+  *            **bpf_sk_lookup_xxx**\ ().
   *    Return
   *            0 on success, or a negative error in case of failure.
   *
+  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
+  *    Description
+  *            Pop an element from *map*.
+  *    Return
+  *            0 on success, or a negative error in case of failure.
+  *
+  * int bpf_map_peek_elem(struct bpf_map *map, void *value)
+  *    Description
+  *            Get an element from *map* without removing it.
+  *    Return
+  *            0 on success, or a negative error in case of failure.
+  *
   * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
   *    Description
-  *            For socket policies, insert *len* bytes into msg at offset
+  *            For socket policies, insert *len* bytes into *msg* at offset
   *            *start*.
   *
   *            If a program of type **BPF_PROG_TYPE_SK_MSG** is run on a
-  *            *msg* it may want to insert metadata or options into the msg.
+  *            *msg* it may want to insert metadata or options into the *msg*.
   *            This can later be read and used by any of the lower layer BPF
   *            hooks.
   *
   *            This helper may fail if under memory pressure (a malloc
   *            fails) in these cases BPF programs will get an appropriate
   *            error and BPF programs will need to handle them.
-  *
   *    Return
   *            0 on success, or a negative error in case of failure.
   *
   * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
-  *     Description
+  *    Description
   *            Will remove *pop* bytes from a *msg* starting at byte *start*.
   *            This may result in **ENOMEM** errors under certain situations if
   *            an allocation and copy are required due to a full ring buffer.
   *            However, the helper will try to avoid doing the allocation
   *            if possible. Other errors can occur if input parameters are
-  *            invalid either due to *start* byte not being valid part of msg
+  *            invalid either due to *start* byte not being valid part of *msg*
   *            payload and/or *pop* value being to large.
+  *    Return
+  *            0 on success, or a negative error in case of failure.
+  *
+  * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+  *    Description
+  *            This helper is used in programs implementing IR decoding, to
+  *            report a successfully decoded pointer movement.
   *
+  *            The *ctx* should point to the lirc sample as passed into
+  *            the program.
+  *
+  *            This helper is only available is the kernel was compiled with
+  *            the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+  *            "**y**".
   *    Return
-  *            0 on success, or a negative erro in case of failure.
+  *            0
   */
  #define __BPF_FUNC_MAPPER(FN)         \
        FN(unspec),                     \
        FN(map_pop_elem),               \
        FN(map_peek_elem),              \
        FN(msg_push_data),              \
-       FN(msg_pop_data),
+       FN(msg_pop_data),               \
+       FN(rc_pointer_rel),
  
  /* integer value in 'imm' field of BPF_CALL instruction selects which helper
   * function eBPF program intends to call
@@@ -2434,9 -2465,6 +2469,9 @@@ enum bpf_func_id 
  /* BPF_FUNC_perf_event_output for sk_buff input context. */
  #define BPF_F_CTXLEN_MASK             (0xfffffULL << 32)
  
 +/* Current network namespace */
 +#define BPF_F_CURRENT_NETNS           (-1L)
 +
  /* Mode for BPF_FUNC_skb_adjust_room helper. */
  enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@@ -2454,12 -2482,6 +2489,12 @@@ enum bpf_lwt_encap_mode 
        BPF_LWT_ENCAP_SEG6_INLINE
  };
  
 +#define __bpf_md_ptr(type, name)      \
 +union {                                       \
 +      type name;                      \
 +      __u64 :64;                      \
 +} __attribute__((aligned(8)))
 +
  /* user accessible mirror of in-kernel sk_buff.
   * new fields can only be added to the end of this structure
   */
@@@ -2494,8 -2516,9 +2529,9 @@@ struct __sk_buff 
        /* ... here. */
  
        __u32 data_meta;
 -      struct bpf_flow_keys *flow_keys;
 +      __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
        __u64 tstamp;
+       __u32 wire_len;
  };
  
  struct bpf_tunnel_key {
@@@ -2611,8 -2634,8 +2647,8 @@@ enum sk_action 
   * be added to the end of this structure
   */
  struct sk_msg_md {
 -      void *data;
 -      void *data_end;
 +      __bpf_md_ptr(void *, data);
 +      __bpf_md_ptr(void *, data_end);
  
        __u32 family;
        __u32 remote_ip4;       /* Stored in network byte order */
@@@ -2628,9 -2651,8 +2664,9 @@@ struct sk_reuseport_md 
         * Start of directly accessible data. It begins from
         * the tcp/udp header.
         */
 -      void *data;
 -      void *data_end;         /* End of directly accessible data */
 +      __bpf_md_ptr(void *, data);
 +      /* End of directly accessible data */
 +      __bpf_md_ptr(void *, data_end);
        /*
         * Total length of packet (starting from the tcp/udp header).
         * Note that the directly accessible bytes (data_end - data)
@@@ -2674,7 -2696,13 +2710,13 @@@ struct bpf_prog_info 
        __u32 btf_id;
        __u32 func_info_rec_size;
        __aligned_u64 func_info;
-       __u32 func_info_cnt;
+       __u32 nr_func_info;
+       __u32 nr_line_info;
+       __aligned_u64 line_info;
+       __aligned_u64 jited_line_info;
+       __u32 nr_jited_line_info;
+       __u32 line_info_rec_size;
+       __u32 jited_line_info_rec_size;
  } __attribute__((aligned(8)));
  
  struct bpf_map_info {
@@@ -2987,8 -3015,18 +3029,18 @@@ struct bpf_flow_keys 
  };
  
  struct bpf_func_info {
-       __u32   insn_offset;
+       __u32   insn_off;
        __u32   type_id;
  };
  
+ #define BPF_LINE_INFO_LINE_NUM(line_col)      ((line_col) >> 10)
+ #define BPF_LINE_INFO_LINE_COL(line_col)      ((line_col) & 0x3ff)
+ struct bpf_line_info {
+       __u32   insn_off;
+       __u32   file_name_off;
+       __u32   line_off;
+       __u32   line_col;
+ };
  #endif /* _UAPI__LINUX_BPF_H__ */
diff --combined kernel/bpf/btf.c
index 793acba40b4c26ea1a7a08e6819c8a89d1e1c6d5,e0a827f95e19b508ebf9133bb55128ccc5a40272..bf34933cc41356f9fe2bcd5a74f6f5403639d5e2
@@@ -444,7 -444,7 +444,7 @@@ static const struct btf_kind_operation
        return kind_ops[BTF_INFO_KIND(t->info)];
  }
  
static bool btf_name_offset_valid(const struct btf *btf, u32 offset)
+ bool btf_name_offset_valid(const struct btf *btf, u32 offset)
  {
        return BTF_STR_OFFSET_VALID(offset) &&
                offset < btf->hdr.str_len;
@@@ -1195,22 -1195,6 +1195,22 @@@ static int btf_ref_type_check_meta(stru
                return -EINVAL;
        }
  
 +      /* typedef type must have a valid name, and other ref types,
 +       * volatile, const, restrict, should have a null name.
 +       */
 +      if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
 +              if (!t->name_off ||
 +                  !btf_name_valid_identifier(env->btf, t->name_off)) {
 +                      btf_verifier_log_type(env, t, "Invalid name");
 +                      return -EINVAL;
 +              }
 +      } else {
 +              if (t->name_off) {
 +                      btf_verifier_log_type(env, t, "Invalid name");
 +                      return -EINVAL;
 +              }
 +      }
 +
        btf_verifier_log_type(env, t, NULL);
  
        return 0;
@@@ -1369,13 -1353,6 +1369,13 @@@ static s32 btf_fwd_check_meta(struct bt
                return -EINVAL;
        }
  
 +      /* fwd type must have a valid name */
 +      if (!t->name_off ||
 +          !btf_name_valid_identifier(env->btf, t->name_off)) {
 +              btf_verifier_log_type(env, t, "Invalid name");
 +              return -EINVAL;
 +      }
 +
        btf_verifier_log_type(env, t, NULL);
  
        return 0;
@@@ -1432,12 -1409,6 +1432,12 @@@ static s32 btf_array_check_meta(struct 
                return -EINVAL;
        }
  
 +      /* array type should not have a name */
 +      if (t->name_off) {
 +              btf_verifier_log_type(env, t, "Invalid name");
 +              return -EINVAL;
 +      }
 +
        if (btf_type_vlen(t)) {
                btf_verifier_log_type(env, t, "vlen != 0");
                return -EINVAL;
@@@ -1614,13 -1585,6 +1614,13 @@@ static s32 btf_struct_check_meta(struc
                return -EINVAL;
        }
  
 +      /* struct type either no name or a valid one */
 +      if (t->name_off &&
 +          !btf_name_valid_identifier(env->btf, t->name_off)) {
 +              btf_verifier_log_type(env, t, "Invalid name");
 +              return -EINVAL;
 +      }
 +
        btf_verifier_log_type(env, t, NULL);
  
        last_offset = 0;
                        return -EINVAL;
                }
  
 +              /* struct member either no name or a valid one */
 +              if (member->name_off &&
 +                  !btf_name_valid_identifier(btf, member->name_off)) {
 +                      btf_verifier_log_member(env, t, member, "Invalid name");
 +                      return -EINVAL;
 +              }
                /* A member cannot be in type void */
                if (!member->type || !BTF_TYPE_ID_VALID(member->type)) {
                        btf_verifier_log_member(env, t, member,
@@@ -1825,13 -1783,6 +1825,13 @@@ static s32 btf_enum_check_meta(struct b
                return -EINVAL;
        }
  
 +      /* enum type either no name or a valid one */
 +      if (t->name_off &&
 +          !btf_name_valid_identifier(env->btf, t->name_off)) {
 +              btf_verifier_log_type(env, t, "Invalid name");
 +              return -EINVAL;
 +      }
 +
        btf_verifier_log_type(env, t, NULL);
  
        for (i = 0; i < nr_enums; i++) {
                        return -EINVAL;
                }
  
 +              /* enum member must have a valid name */
 +              if (!enums[i].name_off ||
 +                  !btf_name_valid_identifier(btf, enums[i].name_off)) {
 +                      btf_verifier_log_type(env, t, "Invalid name");
 +                      return -EINVAL;
 +              }
 +
 +
                btf_verifier_log(env, "\t%s val=%d\n",
                                 btf_name_by_offset(btf, enums[i].name_off),
                                 enums[i].val);
diff --combined kernel/bpf/verifier.c
index ee30effdf98acfdab9e9f2886e61437da3acdd82,2e70b813a1a7530c100877551a12df74c74cfa30..8b511a4fe84ad932d4ce7a34b5b0b1651aa55b5e
@@@ -177,7 -177,6 +177,7 @@@ struct bpf_verifier_stack_elem 
  
  #define BPF_COMPLEXITY_LIMIT_INSNS    131072
  #define BPF_COMPLEXITY_LIMIT_STACK    1024
 +#define BPF_COMPLEXITY_LIMIT_STATES   64
  
  #define BPF_MAP_PTR_UNPRIV    1UL
  #define BPF_MAP_PTR_POISON    ((void *)((0xeB9FUL << 1) +     \
@@@ -3584,12 -3583,15 +3584,15 @@@ static int check_alu_op(struct bpf_veri
                        return err;
  
                if (BPF_SRC(insn->code) == BPF_X) {
+                       struct bpf_reg_state *src_reg = regs + insn->src_reg;
+                       struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
                        if (BPF_CLASS(insn->code) == BPF_ALU64) {
                                /* case: R1 = R2
                                 * copy register state to dest reg
                                 */
-                               regs[insn->dst_reg] = regs[insn->src_reg];
-                               regs[insn->dst_reg].live |= REG_LIVE_WRITTEN;
+                               *dst_reg = *src_reg;
+                               dst_reg->live |= REG_LIVE_WRITTEN;
                        } else {
                                /* R1 = (u32) R2 */
                                if (is_pointer_value(env, insn->src_reg)) {
                                                "R%d partial copy of pointer\n",
                                                insn->src_reg);
                                        return -EACCES;
+                               } else if (src_reg->type == SCALAR_VALUE) {
+                                       *dst_reg = *src_reg;
+                                       dst_reg->live |= REG_LIVE_WRITTEN;
+                               } else {
+                                       mark_reg_unknown(env, regs,
+                                                        insn->dst_reg);
                                }
-                               mark_reg_unknown(env, regs, insn->dst_reg);
-                               coerce_reg_to_size(&regs[insn->dst_reg], 4);
+                               coerce_reg_to_size(dst_reg, 4);
                        }
                } else {
                        /* case: R = imm
                        return -EINVAL;
                }
  
-               if (opcode == BPF_ARSH && BPF_CLASS(insn->code) != BPF_ALU64) {
-                       verbose(env, "BPF_ARSH not supported for 32 bit ALU\n");
-                       return -EINVAL;
-               }
                if ((opcode == BPF_LSH || opcode == BPF_RSH ||
                     opcode == BPF_ARSH) && BPF_SRC(insn->code) == BPF_K) {
                        int size = BPF_CLASS(insn->code) == BPF_ALU64 ? 64 : 32;
@@@ -3765,79 -3767,6 +3768,79 @@@ static void find_good_pkt_pointers(stru
        }
  }
  
 +/* compute branch direction of the expression "if (reg opcode val) goto target;"
 + * and return:
 + *  1 - branch will be taken and "goto target" will be executed
 + *  0 - branch will not be taken and fall-through to next insn
 + * -1 - unknown. Example: "if (reg < 5)" is unknown when register value range [0,10]
 + */
 +static int is_branch_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
 +{
 +      if (__is_pointer_value(false, reg))
 +              return -1;
 +
 +      switch (opcode) {
 +      case BPF_JEQ:
 +              if (tnum_is_const(reg->var_off))
 +                      return !!tnum_equals_const(reg->var_off, val);
 +              break;
 +      case BPF_JNE:
 +              if (tnum_is_const(reg->var_off))
 +                      return !tnum_equals_const(reg->var_off, val);
 +              break;
 +      case BPF_JGT:
 +              if (reg->umin_value > val)
 +                      return 1;
 +              else if (reg->umax_value <= val)
 +                      return 0;
 +              break;
 +      case BPF_JSGT:
 +              if (reg->smin_value > (s64)val)
 +                      return 1;
 +              else if (reg->smax_value < (s64)val)
 +                      return 0;
 +              break;
 +      case BPF_JLT:
 +              if (reg->umax_value < val)
 +                      return 1;
 +              else if (reg->umin_value >= val)
 +                      return 0;
 +              break;
 +      case BPF_JSLT:
 +              if (reg->smax_value < (s64)val)
 +                      return 1;
 +              else if (reg->smin_value >= (s64)val)
 +                      return 0;
 +              break;
 +      case BPF_JGE:
 +              if (reg->umin_value >= val)
 +                      return 1;
 +              else if (reg->umax_value < val)
 +                      return 0;
 +              break;
 +      case BPF_JSGE:
 +              if (reg->smin_value >= (s64)val)
 +                      return 1;
 +              else if (reg->smax_value < (s64)val)
 +                      return 0;
 +              break;
 +      case BPF_JLE:
 +              if (reg->umax_value <= val)
 +                      return 1;
 +              else if (reg->umin_value > val)
 +                      return 0;
 +              break;
 +      case BPF_JSLE:
 +              if (reg->smax_value <= (s64)val)
 +                      return 1;
 +              else if (reg->smin_value > (s64)val)
 +                      return 0;
 +              break;
 +      }
 +
 +      return -1;
 +}
 +
  /* Adjusts the register min/max values in the case that the dst_reg is the
   * variable register that we are working on, and src_reg is a constant or we're
   * simply doing a BPF_K check.
@@@ -4239,15 -4168,21 +4242,15 @@@ static int check_cond_jmp_op(struct bpf
  
        dst_reg = &regs[insn->dst_reg];
  
 -      /* detect if R == 0 where R was initialized to zero earlier */
 -      if (BPF_SRC(insn->code) == BPF_K &&
 -          (opcode == BPF_JEQ || opcode == BPF_JNE) &&
 -          dst_reg->type == SCALAR_VALUE &&
 -          tnum_is_const(dst_reg->var_off)) {
 -              if ((opcode == BPF_JEQ && dst_reg->var_off.value == insn->imm) ||
 -                  (opcode == BPF_JNE && dst_reg->var_off.value != insn->imm)) {
 -                      /* if (imm == imm) goto pc+off;
 -                       * only follow the goto, ignore fall-through
 -                       */
 +      if (BPF_SRC(insn->code) == BPF_K) {
 +              int pred = is_branch_taken(dst_reg, insn->imm, opcode);
 +
 +              if (pred == 1) {
 +                       /* only follow the goto, ignore fall-through */
                        *insn_idx += insn->off;
                        return 0;
 -              } else {
 -                      /* if (imm != imm) goto pc+off;
 -                       * only follow fall-through branch, since
 +              } else if (pred == 0) {
 +                      /* only follow fall-through branch, since
                         * that's where the program will go
                         */
                        return 0;
@@@ -4713,15 -4648,17 +4716,17 @@@ err_free
  #define MIN_BPF_FUNCINFO_SIZE 8
  #define MAX_FUNCINFO_REC_SIZE 252
  
- static int check_btf_func(struct bpf_prog *prog, struct bpf_verifier_env *env,
-                         union bpf_attr *attr, union bpf_attr __user *uattr)
+ static int check_btf_func(struct bpf_verifier_env *env,
+                         const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
  {
        u32 i, nfuncs, urec_size, min_size, prev_offset;
        u32 krec_size = sizeof(struct bpf_func_info);
-       struct bpf_func_info *krecord = NULL;
+       struct bpf_func_info *krecord;
        const struct btf_type *type;
+       struct bpf_prog *prog;
+       const struct btf *btf;
        void __user *urecord;
-       struct btf *btf;
        int ret = 0;
  
        nfuncs = attr->func_info_cnt;
                return -EINVAL;
        }
  
-       btf = btf_get_by_fd(attr->prog_btf_fd);
-       if (IS_ERR(btf)) {
-               verbose(env, "unable to get btf from fd\n");
-               return PTR_ERR(btf);
-       }
+       prog = env->prog;
+       btf = prog->aux->btf;
  
        urecord = u64_to_user_ptr(attr->func_info);
        min_size = min_t(u32, krec_size, urec_size);
  
        krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
-       if (!krecord) {
-               ret = -ENOMEM;
-               goto free_btf;
-       }
+       if (!krecord)
+               return -ENOMEM;
  
        for (i = 0; i < nfuncs; i++) {
                ret = bpf_check_uarg_tail_zero(urecord, krec_size, urec_size);
                                if (put_user(min_size, &uattr->func_info_rec_size))
                                        ret = -EFAULT;
                        }
-                       goto free_btf;
+                       goto err_free;
                }
  
                if (copy_from_user(&krecord[i], urecord, min_size)) {
                        ret = -EFAULT;
-                       goto free_btf;
+                       goto err_free;
                }
  
-               /* check insn_offset */
+               /* check insn_off */
                if (i == 0) {
-                       if (krecord[i].insn_offset) {
+                       if (krecord[i].insn_off) {
                                verbose(env,
-                                       "nonzero insn_offset %u for the first func info record",
-                                       krecord[i].insn_offset);
+                                       "nonzero insn_off %u for the first func info record",
+                                       krecord[i].insn_off);
                                ret = -EINVAL;
-                               goto free_btf;
+                               goto err_free;
                        }
-               } else if (krecord[i].insn_offset <= prev_offset) {
+               } else if (krecord[i].insn_off <= prev_offset) {
                        verbose(env,
                                "same or smaller insn offset (%u) than previous func info record (%u)",
-                               krecord[i].insn_offset, prev_offset);
+                               krecord[i].insn_off, prev_offset);
                        ret = -EINVAL;
-                       goto free_btf;
+                       goto err_free;
                }
  
-               if (env->subprog_info[i].start != krecord[i].insn_offset) {
+               if (env->subprog_info[i].start != krecord[i].insn_off) {
                        verbose(env, "func_info BTF section doesn't match subprog layout in BPF program\n");
                        ret = -EINVAL;
-                       goto free_btf;
+                       goto err_free;
                }
  
                /* check type_id */
                        verbose(env, "invalid type id %d in func info",
                                krecord[i].type_id);
                        ret = -EINVAL;
-                       goto free_btf;
+                       goto err_free;
                }
  
-               prev_offset = krecord[i].insn_offset;
+               prev_offset = krecord[i].insn_off;
                urecord += urec_size;
        }
  
-       prog->aux->btf = btf;
        prog->aux->func_info = krecord;
        prog->aux->func_info_cnt = nfuncs;
        return 0;
  
- free_btf:
-       btf_put(btf);
+ err_free:
        kvfree(krecord);
        return ret;
  }
@@@ -4830,7 -4760,151 +4828,151 @@@ static void adjust_btf_func(struct bpf_
                return;
  
        for (i = 0; i < env->subprog_cnt; i++)
-               env->prog->aux->func_info[i].insn_offset = env->subprog_info[i].start;
+               env->prog->aux->func_info[i].insn_off = env->subprog_info[i].start;
+ }
+ #define MIN_BPF_LINEINFO_SIZE (offsetof(struct bpf_line_info, line_col) + \
+               sizeof(((struct bpf_line_info *)(0))->line_col))
+ #define MAX_LINEINFO_REC_SIZE MAX_FUNCINFO_REC_SIZE
+ static int check_btf_line(struct bpf_verifier_env *env,
+                         const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
+ {
+       u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
+       struct bpf_subprog_info *sub;
+       struct bpf_line_info *linfo;
+       struct bpf_prog *prog;
+       const struct btf *btf;
+       void __user *ulinfo;
+       int err;
+       nr_linfo = attr->line_info_cnt;
+       if (!nr_linfo)
+               return 0;
+       rec_size = attr->line_info_rec_size;
+       if (rec_size < MIN_BPF_LINEINFO_SIZE ||
+           rec_size > MAX_LINEINFO_REC_SIZE ||
+           rec_size & (sizeof(u32) - 1))
+               return -EINVAL;
+       /* Need to zero it in case the userspace may
+        * pass in a smaller bpf_line_info object.
+        */
+       linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
+                        GFP_KERNEL | __GFP_NOWARN);
+       if (!linfo)
+               return -ENOMEM;
+       prog = env->prog;
+       btf = prog->aux->btf;
+       s = 0;
+       sub = env->subprog_info;
+       ulinfo = u64_to_user_ptr(attr->line_info);
+       expected_size = sizeof(struct bpf_line_info);
+       ncopy = min_t(u32, expected_size, rec_size);
+       for (i = 0; i < nr_linfo; i++) {
+               err = bpf_check_uarg_tail_zero(ulinfo, expected_size, rec_size);
+               if (err) {
+                       if (err == -E2BIG) {
+                               verbose(env, "nonzero tailing record in line_info");
+                               if (put_user(expected_size,
+                                            &uattr->line_info_rec_size))
+                                       err = -EFAULT;
+                       }
+                       goto err_free;
+               }
+               if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
+                       err = -EFAULT;
+                       goto err_free;
+               }
+               /*
+                * Check insn_off to ensure
+                * 1) strictly increasing AND
+                * 2) bounded by prog->len
+                *
+                * The linfo[0].insn_off == 0 check logically falls into
+                * the later "missing bpf_line_info for func..." case
+                * because the first linfo[0].insn_off must be the
+                * first sub also and the first sub must have
+                * subprog_info[0].start == 0.
+                */
+               if ((i && linfo[i].insn_off <= prev_offset) ||
+                   linfo[i].insn_off >= prog->len) {
+                       verbose(env, "Invalid line_info[%u].insn_off:%u (prev_offset:%u prog->len:%u)\n",
+                               i, linfo[i].insn_off, prev_offset,
+                               prog->len);
+                       err = -EINVAL;
+                       goto err_free;
+               }
+               if (!btf_name_offset_valid(btf, linfo[i].line_off) ||
+                   !btf_name_offset_valid(btf, linfo[i].file_name_off)) {
+                       verbose(env, "Invalid line_info[%u].line_off or .file_name_off\n", i);
+                       err = -EINVAL;
+                       goto err_free;
+               }
+               if (s != env->subprog_cnt) {
+                       if (linfo[i].insn_off == sub[s].start) {
+                               sub[s].linfo_idx = i;
+                               s++;
+                       } else if (sub[s].start < linfo[i].insn_off) {
+                               verbose(env, "missing bpf_line_info for func#%u\n", s);
+                               err = -EINVAL;
+                               goto err_free;
+                       }
+               }
+               prev_offset = linfo[i].insn_off;
+               ulinfo += rec_size;
+       }
+       if (s != env->subprog_cnt) {
+               verbose(env, "missing bpf_line_info for %u funcs starting from func#%u\n",
+                       env->subprog_cnt - s, s);
+               err = -EINVAL;
+               goto err_free;
+       }
+       prog->aux->linfo = linfo;
+       prog->aux->nr_linfo = nr_linfo;
+       return 0;
+ err_free:
+       kvfree(linfo);
+       return err;
+ }
+ static int check_btf_info(struct bpf_verifier_env *env,
+                         const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
+ {
+       struct btf *btf;
+       int err;
+       if (!attr->func_info_cnt && !attr->line_info_cnt)
+               return 0;
+       btf = btf_get_by_fd(attr->prog_btf_fd);
+       if (IS_ERR(btf))
+               return PTR_ERR(btf);
+       env->prog->aux->btf = btf;
+       err = check_btf_func(env, attr, uattr);
+       if (err)
+               return err;
+       err = check_btf_line(env, attr, uattr);
+       if (err)
+               return err;
+       return 0;
  }
  
  /* check %cur's range satisfies %old's */
@@@ -5185,7 -5259,7 +5327,7 @@@ static int is_state_visited(struct bpf_
        struct bpf_verifier_state_list *new_sl;
        struct bpf_verifier_state_list *sl;
        struct bpf_verifier_state *cur = env->cur_state, *new;
 -      int i, j, err;
 +      int i, j, err, states_cnt = 0;
  
        sl = env->explored_states[insn_idx];
        if (!sl)
                        return 1;
                }
                sl = sl->next;
 +              states_cnt++;
        }
  
 +      if (!env->allow_ptr_leaks && states_cnt > BPF_COMPLEXITY_LIMIT_STATES)
 +              return 0;
 +
        /* there were no equivalent states, remember current one.
         * technically the current state is not proven to be safe yet,
         * but it will either reach outer most bpf_exit (which means it's safe)
@@@ -5357,9 -5427,6 +5499,9 @@@ static int do_check(struct bpf_verifier
                        goto process_bpf_exit;
                }
  
 +              if (signal_pending(current))
 +                      return -EAGAIN;
 +
                if (need_resched())
                        cond_resched();
  
@@@ -6084,7 -6151,7 +6226,7 @@@ static int jit_subprogs(struct bpf_veri
        int i, j, subprog_start, subprog_end = 0, len, subprog;
        struct bpf_insn *insn;
        void *old_bpf_func;
-       int err = -ENOMEM;
+       int err;
  
        if (env->subprog_cnt <= 1)
                return 0;
                insn->imm = 1;
        }
  
+       err = bpf_prog_alloc_jited_linfo(prog);
+       if (err)
+               goto out_undo_insn;
+       err = -ENOMEM;
        func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
        if (!func)
                goto out_undo_insn;
                func[i]->aux->name[0] = 'F';
                func[i]->aux->stack_depth = env->subprog_info[i].stack_depth;
                func[i]->jit_requested = 1;
+               func[i]->aux->linfo = prog->aux->linfo;
+               func[i]->aux->nr_linfo = prog->aux->nr_linfo;
+               func[i]->aux->jited_linfo = prog->aux->jited_linfo;
+               func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
                func[i] = bpf_int_jit_compile(func[i]);
                if (!func[i]->jited) {
                        err = -ENOTSUPP;
        prog->bpf_func = func[0]->bpf_func;
        prog->aux->func = func;
        prog->aux->func_cnt = env->subprog_cnt;
+       bpf_prog_free_unused_jited_linfo(prog);
        return 0;
  out_free:
        for (i = 0; i < env->subprog_cnt; i++)
@@@ -6234,6 -6311,7 +6386,7 @@@ out_undo_insn
                insn->off = 0;
                insn->imm = env->insn_aux_data[i].call_imm;
        }
+       bpf_prog_free_jited_linfo(prog);
        return err;
  }
  
@@@ -6580,6 -6658,8 +6733,8 @@@ int bpf_check(struct bpf_prog **prog, u
        env->strict_alignment = !!(attr->prog_flags & BPF_F_STRICT_ALIGNMENT);
        if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
                env->strict_alignment = true;
+       if (attr->prog_flags & BPF_F_ANY_ALIGNMENT)
+               env->strict_alignment = false;
  
        ret = replace_map_fd_with_map_ptr(env);
        if (ret < 0)
        if (ret < 0)
                goto skip_full_check;
  
-       ret = check_btf_func(env->prog, env, attr, uattr);
+       ret = check_btf_info(env, attr, uattr);
        if (ret < 0)
                goto skip_full_check;
  
diff --combined net/bpf/test_run.c
index 25001913d03b599dde50e85a20de61156465359b,7663e6a57280b19fb9f213d019e2dc8bbe77c2b8..fa2644d276ef1134bc3b41ce02b70bfdd8a678fa
@@@ -28,13 -28,12 +28,13 @@@ static __always_inline u32 bpf_test_run
        return ret;
  }
  
 -static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
 +static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
 +                      u32 *time)
  {
        struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
        enum bpf_cgroup_storage_type stype;
        u64 time_start, time_spent = 0;
 -      u32 ret = 0, i;
 +      u32 i;
  
        for_each_cgroup_storage_type(stype) {
                storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
@@@ -50,7 -49,7 +50,7 @@@
                repeat = 1;
        time_start = ktime_get_ns();
        for (i = 0; i < repeat; i++) {
 -              ret = bpf_test_run_one(prog, ctx, storage);
 +              *ret = bpf_test_run_one(prog, ctx, storage);
                if (need_resched()) {
                        if (signal_pending(current))
                                break;
@@@ -66,7 -65,7 +66,7 @@@
        for_each_cgroup_storage_type(stype)
                bpf_cgroup_storage_free(storage[stype]);
  
 -      return ret;
 +      return 0;
  }
  
  static int bpf_test_finish(const union bpf_attr *kattr,
  {
        void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
        int err = -EFAULT;
+       u32 copy_size = size;
  
-       if (data_out && copy_to_user(data_out, data, size))
+       /* Clamp copy if the user has provided a size hint, but copy the full
+        * buffer if not to retain old behaviour.
+        */
+       if (kattr->test.data_size_out &&
+           copy_size > kattr->test.data_size_out) {
+               copy_size = kattr->test.data_size_out;
+               err = -ENOSPC;
+       }
+       if (data_out && copy_to_user(data_out, data, copy_size))
                goto out;
        if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
                goto out;
@@@ -84,7 -93,8 +94,8 @@@
                goto out;
        if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
                goto out;
-       err = 0;
+       if (err != -ENOSPC)
+               err = 0;
  out:
        return err;
  }
@@@ -166,12 -176,7 +177,12 @@@ int bpf_prog_test_run_skb(struct bpf_pr
                __skb_push(skb, hh_len);
        if (is_direct_pkt_access)
                bpf_compute_data_pointers(skb);
 -      retval = bpf_test_run(prog, skb, repeat, &duration);
 +      ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
 +      if (ret) {
 +              kfree_skb(skb);
 +              kfree(sk);
 +              return ret;
 +      }
        if (!is_l2) {
                if (skb_headroom(skb) < hh_len) {
                        int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
@@@ -218,14 -223,11 +229,14 @@@ int bpf_prog_test_run_xdp(struct bpf_pr
        rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
        xdp.rxq = &rxqueue->xdp_rxq;
  
 -      retval = bpf_test_run(prog, &xdp, repeat, &duration);
 +      ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
 +      if (ret)
 +              goto out;
        if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
            xdp.data_end != xdp.data + size)
                size = xdp.data_end - xdp.data;
        ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
 +out:
        kfree(data);
        return ret;
  }
diff --combined net/core/filter.c
index 8659b40172d18705a6445bfaace622938a0990ff,3d54af4c363d8fb6988313a445a060938b0ab478..f9348806e843ec488c17131b00b45955416d3c0f
@@@ -5070,8 -5070,7 +5070,8 @@@ __bpf_sk_lookup(struct sk_buff *skb, st
        int sdif;
  
        family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
 -      if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags))
 +      if (unlikely(family == AF_UNSPEC || flags ||
 +                   !((s32)netns_id < 0 || netns_id <= S32_MAX)))
                goto out;
  
        if (family == AF_INET)
        else
                sdif = inet6_sdif(skb);
  
 -      if (netns_id) {
 +      if ((s32)netns_id < 0) {
 +              net = caller_net;
 +              sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
 +      } else {
                net = get_net_ns_by_id(caller_net, netns_id);
                if (unlikely(!net))
                        goto out;
                sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
                put_net(net);
 -      } else {
 -              net = caller_net;
 -              sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
        }
  
        if (sk)
@@@ -5738,8 -5737,8 +5738,8 @@@ static bool bpf_skb_is_valid_access(in
                if (size != size_default)
                        return false;
                break;
 -      case bpf_ctx_range(struct __sk_buff, flow_keys):
 -              if (size != sizeof(struct bpf_flow_keys *))
 +      case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
 +              if (size != sizeof(__u64))
                        return false;
                break;
        case bpf_ctx_range(struct __sk_buff, tstamp):
@@@ -5771,9 -5770,10 +5771,10 @@@ static bool sk_filter_is_valid_access(i
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_meta):
        case bpf_ctx_range(struct __sk_buff, data_end):
 -      case bpf_ctx_range(struct __sk_buff, flow_keys):
 +      case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
        case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
  
@@@ -5797,7 -5797,8 +5798,8 @@@ static bool cg_skb_is_valid_access(int 
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
 -      case bpf_ctx_range(struct __sk_buff, flow_keys):
 +      case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        case bpf_ctx_range(struct __sk_buff, data):
        case bpf_ctx_range(struct __sk_buff, data_end):
@@@ -5842,8 -5843,9 +5844,9 @@@ static bool lwt_is_valid_access(int off
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
        case bpf_ctx_range(struct __sk_buff, data_meta):
 -      case bpf_ctx_range(struct __sk_buff, flow_keys):
 +      case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
  
@@@ -6070,7 -6072,7 +6073,7 @@@ static bool tc_cls_act_is_valid_access(
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
 -      case bpf_ctx_range(struct __sk_buff, flow_keys):
 +      case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
                return false;
        }
@@@ -6272,8 -6274,9 +6275,9 @@@ static bool sk_skb_is_valid_access(int 
        switch (off) {
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
 -      case bpf_ctx_range(struct __sk_buff, flow_keys):
 +      case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
        case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
  
@@@ -6354,13 -6357,14 +6358,14 @@@ static bool flow_dissector_is_valid_acc
        case bpf_ctx_range(struct __sk_buff, data_end):
                info->reg_type = PTR_TO_PACKET_END;
                break;
 -      case bpf_ctx_range(struct __sk_buff, flow_keys):
 +      case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
                info->reg_type = PTR_TO_FLOW_KEYS;
                break;
        case bpf_ctx_range(struct __sk_buff, tc_classid):
        case bpf_ctx_range(struct __sk_buff, data_meta):
        case bpf_ctx_range_till(struct __sk_buff, family, local_port):
        case bpf_ctx_range(struct __sk_buff, tstamp):
+       case bpf_ctx_range(struct __sk_buff, wire_len):
                return false;
        }
  
@@@ -6686,6 -6690,17 +6691,17 @@@ static u32 bpf_convert_ctx_access(enum 
                                              bpf_target_off(struct sk_buff,
                                                             tstamp, 8,
                                                             target_size));
+               break;
+       case offsetof(struct __sk_buff, wire_len):
+               BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
+               off = si->off;
+               off -= offsetof(struct __sk_buff, wire_len);
+               off += offsetof(struct sk_buff, cb);
+               off += offsetof(struct qdisc_skb_cb, pkt_len);
+               *target_size = 4;
+               *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
        }
  
        return insn - insn_buf;
index 0f1df9019dfc0b92fee75eeb80de1cb3d5e24447,dbbf6ece676022dcb1c8e61ab187a0ad1c2c3b5e..2392ccdc918faceacfc7822be0e09eaab575d28a
@@@ -32,7 -32,7 +32,7 @@@ static void btf_dumper_ptr(const void *
  }
  
  static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id,
 -                             const void *data)
 +                             __u8 bit_offset, const void *data)
  {
        int actual_type_id;
  
@@@ -40,7 -40,7 +40,7 @@@
        if (actual_type_id < 0)
                return actual_type_id;
  
 -      return btf_dumper_do_type(d, actual_type_id, 0, data);
 +      return btf_dumper_do_type(d, actual_type_id, bit_offset, data);
  }
  
  static void btf_dumper_enum(const void *data, json_writer_t *jw)
@@@ -237,7 -237,7 +237,7 @@@ static int btf_dumper_do_type(const str
        case BTF_KIND_VOLATILE:
        case BTF_KIND_CONST:
        case BTF_KIND_RESTRICT:
 -              return btf_dumper_modifier(d, type_id, data);
 +              return btf_dumper_modifier(d, type_id, bit_offset, data);
        default:
                jsonw_printf(d->jw, "(unsupported-kind");
                return -EINVAL;
@@@ -385,3 -385,67 +385,67 @@@ void btf_dumper_type_only(const struct 
        if (err < 0)
                func_sig[0] = '\0';
  }
+ static const char *ltrim(const char *s)
+ {
+       while (isspace(*s))
+               s++;
+       return s;
+ }
+ void btf_dump_linfo_plain(const struct btf *btf,
+                         const struct bpf_line_info *linfo,
+                         const char *prefix, bool linum)
+ {
+       const char *line = btf__name_by_offset(btf, linfo->line_off);
+       if (!line)
+               return;
+       line = ltrim(line);
+       if (!prefix)
+               prefix = "";
+       if (linum) {
+               const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+               /* More forgiving on file because linum option is
+                * expected to provide more info than the already
+                * available src line.
+                */
+               if (!file)
+                       file = "";
+               printf("%s%s [file:%s line_num:%u line_col:%u]\n",
+                      prefix, line, file,
+                      BPF_LINE_INFO_LINE_NUM(linfo->line_col),
+                      BPF_LINE_INFO_LINE_COL(linfo->line_col));
+       } else {
+               printf("%s%s\n", prefix, line);
+       }
+ }
+ void btf_dump_linfo_json(const struct btf *btf,
+                        const struct bpf_line_info *linfo, bool linum)
+ {
+       const char *line = btf__name_by_offset(btf, linfo->line_off);
+       if (line)
+               jsonw_string_field(json_wtr, "src", ltrim(line));
+       if (linum) {
+               const char *file = btf__name_by_offset(btf, linfo->file_name_off);
+               if (file)
+                       jsonw_string_field(json_wtr, "file", file);
+               if (BPF_LINE_INFO_LINE_NUM(linfo->line_col))
+                       jsonw_int_field(json_wtr, "line_num",
+                                       BPF_LINE_INFO_LINE_NUM(linfo->line_col));
+               if (BPF_LINE_INFO_LINE_COL(linfo->line_col))
+                       jsonw_int_field(json_wtr, "line_col",
+                                       BPF_LINE_INFO_LINE_COL(linfo->line_col));
+       }
+ }
index ec8b40ff386e3ac8cecbaf9ce6ef611228485b13,620ee1f919cf2a077e7d242105bff50e5e399ec1..94c002584068b5b664a9d3f6e4557c0df4faefd2
@@@ -232,6 -232,20 +232,20 @@@ enum bpf_attach_type 
   */
  #define BPF_F_STRICT_ALIGNMENT        (1U << 0)
  
+ /* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
+  * verifier will allow any alignment whatsoever.  On platforms
+  * with strict alignment requirements for loads ands stores (such
+  * as sparc and mips) the verifier validates that all loads and
+  * stores provably follow this requirement.  This flag turns that
+  * checking and enforcement off.
+  *
+  * It is mostly used for testing when we want to validate the
+  * context and memory access aspects of the verifier, but because
+  * of an unaligned access the alignment check would trigger before
+  * the one we are interested in.
+  */
+ #define BPF_F_ANY_ALIGNMENT   (1U << 1)
  /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
  #define BPF_PSEUDO_MAP_FD     1
  
@@@ -342,6 -356,9 +356,9 @@@ union bpf_attr 
                __u32           func_info_rec_size;     /* userspace bpf_func_info size */
                __aligned_u64   func_info;      /* func info */
                __u32           func_info_cnt;  /* number of bpf_func_info records */
+               __u32           line_info_rec_size;     /* userspace bpf_line_info size */
+               __aligned_u64   line_info;      /* line info */
+               __u32           line_info_cnt;  /* number of bpf_line_info records */
        };
  
        struct { /* anonymous struct used by BPF_OBJ_* commands */
        struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
                __u32           prog_fd;
                __u32           retval;
-               __u32           data_size_in;
-               __u32           data_size_out;
+               __u32           data_size_in;   /* input: len of data_in */
+               __u32           data_size_out;  /* input/output: len of data_out
+                                                *   returns ENOSPC if data_out
+                                                *   is too small.
+                                                */
                __aligned_u64   data_in;
                __aligned_u64   data_out;
                __u32           repeat;
   *    Return
   *            0 on success, or a negative error in case of failure.
   *
 - * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
 + * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
   *    Description
   *            Look for TCP socket matching *tuple*, optionally in a child
   *            network namespace *netns*. The return value must be checked,
   *            **sizeof**\ (*tuple*\ **->ipv6**)
   *                    Look for an IPv6 socket.
   *
 - *            If the *netns* is zero, then the socket lookup table in the
 - *            netns associated with the *ctx* will be used. For the TC hooks,
 - *            this in the netns of the device in the skb. For socket hooks,
 - *            this in the netns of the socket. If *netns* is non-zero, then
 - *            it specifies the ID of the netns relative to the netns
 - *            associated with the *ctx*.
 + *            If the *netns* is a negative signed 32-bit integer, then the
 + *            socket lookup table in the netns associated with the *ctx* will
 + *            will be used. For the TC hooks, this is the netns of the device
 + *            in the skb. For socket hooks, this is the netns of the socket.
 + *            If *netns* is any other signed 32-bit value greater than or
 + *            equal to zero then it specifies the ID of the netns relative to
 + *            the netns associated with the *ctx*. *netns* values beyond the
 + *            range of 32-bit integers are reserved for future use.
   *
   *            All values for *flags* are reserved for future usage, and must
   *            be left at zero.
   *            **CONFIG_NET** configuration option.
   *    Return
   *            Pointer to *struct bpf_sock*, or NULL in case of failure.
 - *            For sockets with reuseport option, *struct bpf_sock*
 - *            return is from reuse->socks[] using hash of the packet.
 + *            For sockets with reuseport option, the *struct bpf_sock*
 + *            result is from reuse->socks[] using the hash of the tuple.
   *
 - * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u32 netns, u64 flags)
 + * struct bpf_sock *bpf_sk_lookup_udp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
   *    Description
   *            Look for UDP socket matching *tuple*, optionally in a child
   *            network namespace *netns*. The return value must be checked,
   *            **sizeof**\ (*tuple*\ **->ipv6**)
   *                    Look for an IPv6 socket.
   *
 - *            If the *netns* is zero, then the socket lookup table in the
 - *            netns associated with the *ctx* will be used. For the TC hooks,
 - *            this in the netns of the device in the skb. For socket hooks,
 - *            this in the netns of the socket. If *netns* is non-zero, then
 - *            it specifies the ID of the netns relative to the netns
 - *            associated with the *ctx*.
 + *            If the *netns* is a negative signed 32-bit integer, then the
 + *            socket lookup table in the netns associated with the *ctx* will
 + *            will be used. For the TC hooks, this is the netns of the device
 + *            in the skb. For socket hooks, this is the netns of the socket.
 + *            If *netns* is any other signed 32-bit value greater than or
 + *            equal to zero then it specifies the ID of the netns relative to
 + *            the netns associated with the *ctx*. *netns* values beyond the
 + *            range of 32-bit integers are reserved for future use.
   *
   *            All values for *flags* are reserved for future usage, and must
   *            be left at zero.
   *            **CONFIG_NET** configuration option.
   *    Return
   *            Pointer to *struct bpf_sock*, or NULL in case of failure.
 - *            For sockets with reuseport option, *struct bpf_sock*
 - *            return is from reuse->socks[] using hash of the packet.
 + *            For sockets with reuseport option, the *struct bpf_sock*
 + *            result is from reuse->socks[] using the hash of the tuple.
   *
   * int bpf_sk_release(struct bpf_sock *sk)
   *    Description
   *            if possible. Other errors can occur if input parameters are
   *            invalid either due to *start* byte not being valid part of msg
   *            payload and/or *pop* value being to large.
+  *    Return
+  *            0 on success, or a negative error in case of failure.
+  *
+  * int bpf_rc_pointer_rel(void *ctx, s32 rel_x, s32 rel_y)
+  *    Description
+  *            This helper is used in programs implementing IR decoding, to
+  *            report a successfully decoded pointer movement.
+  *
+  *            The *ctx* should point to the lirc sample as passed into
+  *            the program.
   *
+  *            This helper is only available is the kernel was compiled with
+  *            the **CONFIG_BPF_LIRC_MODE2** configuration option set to
+  *            "**y**".
   *    Return
-  *            0 on success, or a negative erro in case of failure.
+  *            0
   */
  #define __BPF_FUNC_MAPPER(FN)         \
        FN(unspec),                     \
        FN(map_pop_elem),               \
        FN(map_peek_elem),              \
        FN(msg_push_data),              \
-       FN(msg_pop_data),
+       FN(msg_pop_data),               \
+       FN(rc_pointer_rel),
  
  /* integer value in 'imm' field of BPF_CALL instruction selects which helper
   * function eBPF program intends to call
@@@ -2434,9 -2464,6 +2468,9 @@@ enum bpf_func_id 
  /* BPF_FUNC_perf_event_output for sk_buff input context. */
  #define BPF_F_CTXLEN_MASK             (0xfffffULL << 32)
  
 +/* Current network namespace */
 +#define BPF_F_CURRENT_NETNS           (-1L)
 +
  /* Mode for BPF_FUNC_skb_adjust_room helper. */
  enum bpf_adj_room_mode {
        BPF_ADJ_ROOM_NET,
@@@ -2454,12 -2481,6 +2488,12 @@@ enum bpf_lwt_encap_mode 
        BPF_LWT_ENCAP_SEG6_INLINE
  };
  
 +#define __bpf_md_ptr(type, name)      \
 +union {                                       \
 +      type name;                      \
 +      __u64 :64;                      \
 +} __attribute__((aligned(8)))
 +
  /* user accessible mirror of in-kernel sk_buff.
   * new fields can only be added to the end of this structure
   */
@@@ -2494,8 -2515,9 +2528,9 @@@ struct __sk_buff 
        /* ... here. */
  
        __u32 data_meta;
 -      struct bpf_flow_keys *flow_keys;
 +      __bpf_md_ptr(struct bpf_flow_keys *, flow_keys);
        __u64 tstamp;
+       __u32 wire_len;
  };
  
  struct bpf_tunnel_key {
@@@ -2611,8 -2633,8 +2646,8 @@@ enum sk_action 
   * be added to the end of this structure
   */
  struct sk_msg_md {
 -      void *data;
 -      void *data_end;
 +      __bpf_md_ptr(void *, data);
 +      __bpf_md_ptr(void *, data_end);
  
        __u32 family;
        __u32 remote_ip4;       /* Stored in network byte order */
@@@ -2628,9 -2650,8 +2663,9 @@@ struct sk_reuseport_md 
         * Start of directly accessible data. It begins from
         * the tcp/udp header.
         */
 -      void *data;
 -      void *data_end;         /* End of directly accessible data */
 +      __bpf_md_ptr(void *, data);
 +      /* End of directly accessible data */
 +      __bpf_md_ptr(void *, data_end);
        /*
         * Total length of packet (starting from the tcp/udp header).
         * Note that the directly accessible bytes (data_end - data)
@@@ -2674,7 -2695,13 +2709,13 @@@ struct bpf_prog_info 
        __u32 btf_id;
        __u32 func_info_rec_size;
        __aligned_u64 func_info;
-       __u32 func_info_cnt;
+       __u32 nr_func_info;
+       __u32 nr_line_info;
+       __aligned_u64 line_info;
+       __aligned_u64 jited_line_info;
+       __u32 nr_jited_line_info;
+       __u32 line_info_rec_size;
+       __u32 jited_line_info_rec_size;
  } __attribute__((aligned(8)));
  
  struct bpf_map_info {
@@@ -2987,8 -3014,18 +3028,18 @@@ struct bpf_flow_keys 
  };
  
  struct bpf_func_info {
-       __u32   insn_offset;
+       __u32   insn_off;
        __u32   type_id;
  };
  
+ #define BPF_LINE_INFO_LINE_NUM(line_col)      ((line_col) >> 10)
+ #define BPF_LINE_INFO_LINE_COL(line_col)      ((line_col) & 0x3ff)
+ struct bpf_line_info {
+       __u32   insn_off;
+       __u32   file_name_off;
+       __u32   line_off;
+       __u32   line_col;
+ };
  #endif /* _UAPI__LINUX_BPF_H__ */
index d3eef9d8210021729d91608e2dccada5c0dc2932,04c060e8f10a5c794b17e92b479f036c04bd98cf..6c77cf7bedce84fac91e68858d818ad2063fafeb
@@@ -156,12 -156,12 +156,12 @@@ static unsigned long long (*bpf_skb_anc
        (void *) BPF_FUNC_skb_ancestor_cgroup_id;
  static struct bpf_sock *(*bpf_sk_lookup_tcp)(void *ctx,
                                             struct bpf_sock_tuple *tuple,
 -                                           int size, unsigned int netns_id,
 +                                           int size, unsigned long long netns_id,
                                             unsigned long long flags) =
        (void *) BPF_FUNC_sk_lookup_tcp;
  static struct bpf_sock *(*bpf_sk_lookup_udp)(void *ctx,
                                             struct bpf_sock_tuple *tuple,
 -                                           int size, unsigned int netns_id,
 +                                           int size, unsigned long long netns_id,
                                             unsigned long long flags) =
        (void *) BPF_FUNC_sk_lookup_udp;
  static int (*bpf_sk_release)(struct bpf_sock *sk) =
@@@ -170,6 -170,8 +170,8 @@@ static int (*bpf_skb_vlan_push)(void *c
        (void *) BPF_FUNC_skb_vlan_push;
  static int (*bpf_skb_vlan_pop)(void *ctx) =
        (void *) BPF_FUNC_skb_vlan_pop;
+ static int (*bpf_rc_pointer_rel)(void *ctx, int rel_x, int rel_y) =
+       (void *) BPF_FUNC_rc_pointer_rel;
  
  /* llvm builtin functions that eBPF C program may use to
   * emit BPF_LD_ABS and BPF_LD_IND instructions
index bded6c2a97d3f48f262f3d2e7cf85235807b25be,d4c63316c862b0f7e11c25d90c6b0406c155e356..f570e0a39959ec3b87e8bae29366d72e1af5da97
@@@ -6,6 -6,7 +6,7 @@@
  #include <linux/err.h>
  #include <linux/kernel.h>
  #include <linux/filter.h>
+ #include <linux/unistd.h>
  #include <bpf/bpf.h>
  #include <sys/resource.h>
  #include <libelf.h>
@@@ -107,19 -108,20 +108,20 @@@ static int __base_pr(const char *format
  #define BTF_END_RAW 0xdeadbeef
  #define NAME_TBD 0xdeadb33f
  
- #define MAX_NR_RAW_TYPES 1024
+ #define MAX_NR_RAW_U32 1024
  #define BTF_LOG_BUF_SIZE 65535
  
  static struct args {
        unsigned int raw_test_num;
        unsigned int file_test_num;
        unsigned int get_info_test_num;
+       unsigned int info_raw_test_num;
        bool raw_test;
        bool file_test;
        bool get_info_test;
        bool pprint_test;
        bool always_log;
-       bool func_type_test;
+       bool info_raw_test;
  } args;
  
  static char btf_log_buf[BTF_LOG_BUF_SIZE];
@@@ -135,7 -137,7 +137,7 @@@ struct btf_raw_test 
        const char *str_sec;
        const char *map_name;
        const char *err_str;
-       __u32 raw_types[MAX_NR_RAW_TYPES];
+       __u32 raw_types[MAX_NR_RAW_U32];
        __u32 str_sec_size;
        enum bpf_map_type map_type;
        __u32 key_size;
        int str_len_delta;
  };
  
+ #define BTF_STR_SEC(str) \
+       .str_sec = str, .str_sec_size = sizeof(str)
  static struct btf_raw_test raw_tests[] = {
  /* enum E {
   *     E0,
                /* const void* */       /* [3] */
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
                /* typedef const void * const_void_ptr */
 -              BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
 -              /* struct A { */        /* [4] */
 +              BTF_TYPEDEF_ENC(NAME_TBD, 3),   /* [4] */
 +              /* struct A { */        /* [5] */
                BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), sizeof(void *)),
                /* const_void_ptr m; */
 -              BTF_MEMBER_ENC(NAME_TBD, 3, 0),
 +              BTF_MEMBER_ENC(NAME_TBD, 4, 0),
                /* } */
                BTF_END_RAW,
        },
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 0),
                /* const void* */       /* [3] */
                BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2),
 -              /* typedef const void * const_void_ptr */       /* [4] */
 -              BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 3),
 -              /* const_void_ptr[4] */ /* [5] */
 -              BTF_TYPE_ARRAY_ENC(3, 1, 4),
 +              /* typedef const void * const_void_ptr */
 +              BTF_TYPEDEF_ENC(NAME_TBD, 3),   /* [4] */
 +              /* const_void_ptr[4] */
 +              BTF_TYPE_ARRAY_ENC(4, 1, 4),    /* [5] */
                BTF_END_RAW,
        },
        .str_sec = "\0const_void_ptr",
        .err_str = "type != 0",
  },
  
 +{
 +      .descr = "typedef (invalid name, name_off = 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
 +              BTF_TYPEDEF_ENC(0, 1),                          /* [2] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__int",
 +      .str_sec_size = sizeof("\0__int"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "typedef_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "typedef (invalid name, invalid identifier)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),  /* [1] */
 +              BTF_TYPEDEF_ENC(NAME_TBD, 1),                   /* [2] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__!int",
 +      .str_sec_size = sizeof("\0__!int"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "typedef_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "ptr type (invalid name, name_off <> 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1),      /* [2] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__int",
 +      .str_sec_size = sizeof("\0__int"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "ptr_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "volatile type (invalid name, name_off <> 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), 1), /* [2] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__int",
 +      .str_sec_size = sizeof("\0__int"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "volatile_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "const type (invalid name, name_off <> 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), 1),    /* [2] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__int",
 +      .str_sec_size = sizeof("\0__int"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "const_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "restrict type (invalid name, name_off <> 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 1),   /* [2] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), 2), /* [3] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__int",
 +      .str_sec_size = sizeof("\0__int"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "restrict_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "fwd type (invalid name, name_off = 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0),   /* [2] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__skb",
 +      .str_sec_size = sizeof("\0__skb"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "fwd_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "fwd type (invalid name, invalid identifier)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_FWD, 0, 0), 0),      /* [2] */
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__!skb",
 +      .str_sec_size = sizeof("\0__!skb"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "fwd_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "array type (invalid name, name_off <> 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0),    /* [2] */
 +              BTF_ARRAY_ENC(1, 1, 4),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0__skb",
 +      .str_sec_size = sizeof("\0__skb"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "array_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "struct type (name_off = 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(0,
 +                           BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
 +              BTF_MEMBER_ENC(NAME_TBD, 1, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0A",
 +      .str_sec_size = sizeof("\0A"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "struct_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +},
 +
 +{
 +      .descr = "struct type (invalid name, invalid identifier)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
 +              BTF_MEMBER_ENC(NAME_TBD, 1, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0A!\0B",
 +      .str_sec_size = sizeof("\0A!\0B"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "struct_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "struct member (name_off = 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(0,
 +                           BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
 +              BTF_MEMBER_ENC(NAME_TBD, 1, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0A",
 +      .str_sec_size = sizeof("\0A"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "struct_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +},
 +
 +{
 +      .descr = "struct member (invalid name, invalid identifier)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 4),   /* [2] */
 +              BTF_MEMBER_ENC(NAME_TBD, 1, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0A\0B*",
 +      .str_sec_size = sizeof("\0A\0B*"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "struct_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "enum type (name_off = 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(0,
 +                           BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
 +                           sizeof(int)),                              /* [2] */
 +              BTF_ENUM_ENC(NAME_TBD, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0A\0B",
 +      .str_sec_size = sizeof("\0A\0B"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "enum_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +},
 +
 +{
 +      .descr = "enum type (invalid name, invalid identifier)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(NAME_TBD,
 +                           BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
 +                           sizeof(int)),                              /* [2] */
 +              BTF_ENUM_ENC(NAME_TBD, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0A!\0B",
 +      .str_sec_size = sizeof("\0A!\0B"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "enum_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "enum member (invalid name, name_off = 0)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(0,
 +                           BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
 +                           sizeof(int)),                              /* [2] */
 +              BTF_ENUM_ENC(0, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "",
 +      .str_sec_size = sizeof(""),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "enum_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
 +
 +{
 +      .descr = "enum member (invalid name, invalid identifier)",
 +      .raw_types = {
 +              BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4),          /* [1] */
 +              BTF_TYPE_ENC(0,
 +                           BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1),
 +                           sizeof(int)),                              /* [2] */
 +              BTF_ENUM_ENC(NAME_TBD, 0),
 +              BTF_END_RAW,
 +      },
 +      .str_sec = "\0A!",
 +      .str_sec_size = sizeof("\0A!"),
 +      .map_type = BPF_MAP_TYPE_ARRAY,
 +      .map_name = "enum_type_check_btf",
 +      .key_size = sizeof(int),
 +      .value_size = sizeof(int),
 +      .key_type_id = 1,
 +      .value_type_id = 1,
 +      .max_entries = 4,
 +      .btf_load_err = true,
 +      .err_str = "Invalid name",
 +},
  {
        .descr = "arraymap invalid btf key (a bit field)",
        .raw_types = {
@@@ -2217,11 -1861,11 +2222,11 @@@ static const char *get_next_str(const c
        return start < end - 1 ? start + 1 : NULL;
  }
  
- static int get_type_sec_size(const __u32 *raw_types)
+ static int get_raw_sec_size(const __u32 *raw_types)
  {
        int i;
  
-       for (i = MAX_NR_RAW_TYPES - 1;
+       for (i = MAX_NR_RAW_U32 - 1;
             i >= 0 && raw_types[i] != BTF_END_RAW;
             i--)
                ;
@@@ -2233,7 -1877,8 +2238,8 @@@ static void *btf_raw_create(const struc
                            const __u32 *raw_types,
                            const char *str,
                            unsigned int str_sec_size,
-                           unsigned int *btf_size)
+                           unsigned int *btf_size,
+                           const char **ret_next_str)
  {
        const char *next_str = str, *end_str = str + str_sec_size;
        unsigned int size_needed, offset;
        uint32_t *ret_types;
        void *raw_btf;
  
-       type_sec_size = get_type_sec_size(raw_types);
+       type_sec_size = get_raw_sec_size(raw_types);
        if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types"))
                return NULL;
  
        ret_hdr->str_len = str_sec_size;
  
        *btf_size = size_needed;
+       if (ret_next_str)
+               *ret_next_str = next_str;
  
        return raw_btf;
  }
@@@ -2300,7 -1947,7 +2308,7 @@@ static int do_test_raw(unsigned int tes
                                 test->raw_types,
                                 test->str_sec,
                                 test->str_sec_size,
-                                &raw_btf_size);
+                                &raw_btf_size, NULL);
  
        if (!raw_btf)
                return -1;
@@@ -2377,7 -2024,7 +2385,7 @@@ static int test_raw(void
  struct btf_get_info_test {
        const char *descr;
        const char *str_sec;
-       __u32 raw_types[MAX_NR_RAW_TYPES];
+       __u32 raw_types[MAX_NR_RAW_U32];
        __u32 str_sec_size;
        int btf_size_delta;
        int (*special_test)(unsigned int test_num);
@@@ -2457,7 -2104,7 +2465,7 @@@ static int test_big_btf_info(unsigned i
                                 test->raw_types,
                                 test->str_sec,
                                 test->str_sec_size,
-                                &raw_btf_size);
+                                &raw_btf_size, NULL);
  
        if (!raw_btf)
                return -1;
@@@ -2541,7 -2188,7 +2549,7 @@@ static int test_btf_id(unsigned int tes
                                 test->raw_types,
                                 test->str_sec,
                                 test->str_sec_size,
-                                &raw_btf_size);
+                                &raw_btf_size, NULL);
  
        if (!raw_btf)
                return -1;
@@@ -2679,7 -2326,7 +2687,7 @@@ static int do_test_get_info(unsigned in
                                 test->raw_types,
                                 test->str_sec,
                                 test->str_sec_size,
-                                &raw_btf_size);
+                                &raw_btf_size, NULL);
  
        if (!raw_btf)
                return -1;
@@@ -2901,9 -2548,9 +2909,9 @@@ static int do_test_file(unsigned int te
                err = -1;
                goto done;
        }
-       if (CHECK(info.func_info_cnt != 3,
-                 "incorrect info.func_info_cnt (1st) %d",
-                 info.func_info_cnt)) {
+       if (CHECK(info.nr_func_info != 3,
+                 "incorrect info.nr_func_info (1st) %d",
+                 info.nr_func_info)) {
                err = -1;
                goto done;
        }
                goto done;
        }
  
-       func_info = malloc(info.func_info_cnt * rec_size);
+       func_info = malloc(info.nr_func_info * rec_size);
        if (CHECK(!func_info, "out of memory")) {
                err = -1;
                goto done;
  
        /* reset info to only retrieve func_info related data */
        memset(&info, 0, sizeof(info));
-       info.func_info_cnt = 3;
+       info.nr_func_info = 3;
        info.func_info_rec_size = rec_size;
        info.func_info = ptr_to_u64(func_info);
  
                err = -1;
                goto done;
        }
-       if (CHECK(info.func_info_cnt != 3,
-                 "incorrect info.func_info_cnt (2nd) %d",
-                 info.func_info_cnt)) {
+       if (CHECK(info.nr_func_info != 3,
+                 "incorrect info.nr_func_info (2nd) %d",
+                 info.nr_func_info)) {
                err = -1;
                goto done;
        }
@@@ -3208,7 -2855,7 +3216,7 @@@ static int do_test_pprint(void
        fprintf(stderr, "%s......", test->descr);
        raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
                                 test->str_sec, test->str_sec_size,
-                                &raw_btf_size);
+                                &raw_btf_size, NULL);
  
        if (!raw_btf)
                return -1;
@@@ -3412,18 -3059,25 +3420,25 @@@ static int test_pprint(void
        return err;
  }
  
- static struct btf_func_type_test {
+ #define BPF_LINE_INFO_ENC(insn_off, file_off, line_off, line_num, line_col) \
+       (insn_off), (file_off), (line_off), ((line_num) << 10 | ((line_col) & 0x3ff))
+ static struct prog_info_raw_test {
        const char *descr;
        const char *str_sec;
-       __u32 raw_types[MAX_NR_RAW_TYPES];
+       const char *err_str;
+       __u32 raw_types[MAX_NR_RAW_U32];
        __u32 str_sec_size;
        struct bpf_insn insns[MAX_INSNS];
        __u32 prog_type;
        __u32 func_info[MAX_SUBPROGS][2];
        __u32 func_info_rec_size;
        __u32 func_info_cnt;
+       __u32 line_info[MAX_NR_RAW_U32];
+       __u32 line_info_rec_size;
+       __u32 nr_jited_ksyms;
        bool expected_prog_load_failure;
- } func_type_test[] = {
+ } info_raw_tests[] = {
  {
        .descr = "func_type (main func + one sub)",
        .raw_types = {
        .func_info = { {0, 5}, {3, 6} },
        .func_info_rec_size = 8,
        .func_info_cnt = 2,
+       .line_info = { BTF_END_RAW },
  },
  
  {
        .func_info = { {0, 5}, {3, 6} },
        .func_info_rec_size = 4,
        .func_info_cnt = 2,
+       .line_info = { BTF_END_RAW },
        .expected_prog_load_failure = true,
  },
  
        .func_info = { {0, 5}, {3, 6} },
        .func_info_rec_size = 8,
        .func_info_cnt = 1,
+       .line_info = { BTF_END_RAW },
        .expected_prog_load_failure = true,
  },
  
  {
-       .descr = "func_type (Incorrect bpf_func_info.insn_offset)",
+       .descr = "func_type (Incorrect bpf_func_info.insn_off)",
        .raw_types = {
                BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
                BTF_TYPE_INT_ENC(NAME_TBD, 0, 0, 32, 4),        /* [2] */
        .func_info = { {0, 5}, {2, 6} },
        .func_info_rec_size = 8,
        .func_info_cnt = 2,
+       .line_info = { BTF_END_RAW },
+       .expected_prog_load_failure = true,
+ },
+ {
+       .descr = "line_info (No subprog)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_MOV64_IMM(BPF_REG_1, 2),
+               BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+               BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+               BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info),
+       .nr_jited_ksyms = 1,
+ },
+ {
+       .descr = "line_info (No subprog. insn_off >= prog->len)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_MOV64_IMM(BPF_REG_1, 2),
+               BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+               BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9),
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+               BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7),
+               BPF_LINE_INFO_ENC(4, 0, 0, 5, 6),
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info),
+       .nr_jited_ksyms = 1,
+       .err_str = "line_info[4].insn_off",
+       .expected_prog_load_failure = true,
+ },
+ {
+       .descr = "line_info (No subprog. zero tailing line_info",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_MOV64_IMM(BPF_REG_1, 2),
+               BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0,
+               BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0,
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0,
+               BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 0,
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32),
+       .nr_jited_ksyms = 1,
+ },
+ {
+       .descr = "line_info (No subprog. nonzero tailing line_info)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1;\0int b=2;\0return a + b;\0return a + b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_0, 1),
+               BPF_MOV64_IMM(BPF_REG_1, 2),
+               BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10), 0,
+               BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 2, 9), 0,
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8), 0,
+               BPF_LINE_INFO_ENC(3, 0, NAME_TBD, 4, 7), 1,
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info) + sizeof(__u32),
+       .nr_jited_ksyms = 1,
+       .err_str = "nonzero tailing record in line_info",
+       .expected_prog_load_failure = true,
+ },
+ {
+       .descr = "line_info (subprog)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_2, 1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+               BPF_CALL_REL(1),
+               BPF_EXIT_INSN(),
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+               BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
+               BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info),
+       .nr_jited_ksyms = 2,
+ },
+ {
+       .descr = "line_info (subprog + func_info)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_FUNC_PROTO_ENC(1, 1),                       /* [2] */
+                       BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
+               BTF_FUNC_ENC(NAME_TBD, 2),                      /* [3] */
+               BTF_FUNC_ENC(NAME_TBD, 2),                      /* [4] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0x\0sub\0main\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_2, 1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+               BPF_CALL_REL(1),
+               BPF_EXIT_INSN(),
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 2,
+       .func_info_rec_size = 8,
+       .func_info = { {0, 4}, {5, 3} },
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+               BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
+               BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info),
+       .nr_jited_ksyms = 2,
+ },
+ {
+       .descr = "line_info (subprog. missing 1st func line info)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_2, 1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+               BPF_CALL_REL(1),
+               BPF_EXIT_INSN(),
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(1, 0, NAME_TBD, 1, 10),
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+               BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 3, 8),
+               BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info),
+       .nr_jited_ksyms = 2,
+       .err_str = "missing bpf_line_info for func#0",
+       .expected_prog_load_failure = true,
+ },
+ {
+       .descr = "line_info (subprog. missing 2nd func line info)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_2, 1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+               BPF_CALL_REL(1),
+               BPF_EXIT_INSN(),
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 2, 9),
+               BPF_LINE_INFO_ENC(6, 0, NAME_TBD, 3, 8),
+               BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info),
+       .nr_jited_ksyms = 2,
+       .err_str = "missing bpf_line_info for func#1",
+       .expected_prog_load_failure = true,
+ },
+ {
+       .descr = "line_info (subprog. unordered insn offset)",
+       .raw_types = {
+               BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 4),   /* [1] */
+               BTF_END_RAW,
+       },
+       BTF_STR_SEC("\0int\0int a=1+1;\0return func(a);\0b+=1;\0return b;"),
+       .insns = {
+               BPF_MOV64_IMM(BPF_REG_2, 1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
+               BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
+               BPF_CALL_REL(1),
+               BPF_EXIT_INSN(),
+               BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
+               BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 1),
+               BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+       .func_info_cnt = 0,
+       .line_info = {
+               BPF_LINE_INFO_ENC(0, 0, NAME_TBD, 1, 10),
+               BPF_LINE_INFO_ENC(5, 0, NAME_TBD, 2, 9),
+               BPF_LINE_INFO_ENC(2, 0, NAME_TBD, 3, 8),
+               BPF_LINE_INFO_ENC(7, 0, NAME_TBD, 4, 7),
+               BTF_END_RAW,
+       },
+       .line_info_rec_size = sizeof(struct bpf_line_info),
+       .nr_jited_ksyms = 2,
+       .err_str = "Invalid line_info[2].insn_off",
        .expected_prog_load_failure = true,
  },
  
@@@ -3559,90 -3488,84 +3849,84 @@@ static size_t probe_prog_length(const s
        return len + 1;
  }
  
- static int do_test_func_type(int test_num)
+ static __u32 *patch_name_tbd(const __u32 *raw_u32,
+                            const char *str, __u32 str_off,
+                            unsigned int str_sec_size,
+                            unsigned int *ret_size)
  {
-       const struct btf_func_type_test *test = &func_type_test[test_num];
-       unsigned int raw_btf_size, info_len, rec_size;
-       int i, btf_fd = -1, prog_fd = -1, err = 0;
-       struct bpf_load_program_attr attr = {};
-       void *raw_btf, *func_info = NULL;
-       struct bpf_prog_info info = {};
-       struct bpf_func_info *finfo;
+       int i, raw_u32_size = get_raw_sec_size(raw_u32);
+       const char *end_str = str + str_sec_size;
+       const char *next_str = str + str_off;
+       __u32 *new_u32 = NULL;
  
-       fprintf(stderr, "%s......", test->descr);
-       raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
-                                test->str_sec, test->str_sec_size,
-                                &raw_btf_size);
+       if (raw_u32_size == -1)
+               return ERR_PTR(-EINVAL);
  
-       if (!raw_btf)
-               return -1;
+       if (!raw_u32_size) {
+               *ret_size = 0;
+               return NULL;
+       }
  
-       *btf_log_buf = '\0';
-       btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
-                             btf_log_buf, BTF_LOG_BUF_SIZE,
-                             args.always_log);
-       free(raw_btf);
+       new_u32 = malloc(raw_u32_size);
+       if (!new_u32)
+               return ERR_PTR(-ENOMEM);
  
-       if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
-               err = -1;
-               goto done;
+       for (i = 0; i < raw_u32_size / sizeof(raw_u32[0]); i++) {
+               if (raw_u32[i] == NAME_TBD) {
+                       next_str = get_next_str(next_str, end_str);
+                       if (CHECK(!next_str, "Error in getting next_str\n")) {
+                               free(new_u32);
+                               return ERR_PTR(-EINVAL);
+                       }
+                       new_u32[i] = next_str - str;
+                       next_str += strlen(next_str);
+               } else {
+                       new_u32[i] = raw_u32[i];
+               }
        }
  
-       if (*btf_log_buf && args.always_log)
-               fprintf(stderr, "\n%s", btf_log_buf);
-       attr.prog_type = test->prog_type;
-       attr.insns = test->insns;
-       attr.insns_cnt = probe_prog_length(attr.insns);
-       attr.license = "GPL";
-       attr.prog_btf_fd = btf_fd;
-       attr.func_info_rec_size = test->func_info_rec_size;
-       attr.func_info_cnt = test->func_info_cnt;
-       attr.func_info = test->func_info;
+       *ret_size = raw_u32_size;
+       return new_u32;
+ }
  
-       *btf_log_buf = '\0';
-       prog_fd = bpf_load_program_xattr(&attr, btf_log_buf,
-                                        BTF_LOG_BUF_SIZE);
-       if (test->expected_prog_load_failure && prog_fd == -1) {
-               err = 0;
-               goto done;
-       }
-       if (CHECK(prog_fd == -1, "invalid prog_id errno:%d", errno)) {
-               fprintf(stderr, "%s\n", btf_log_buf);
-               err = -1;
-               goto done;
-       }
+ static int test_get_finfo(const struct prog_info_raw_test *test,
+                         int prog_fd)
+ {
+       struct bpf_prog_info info = {};
+       struct bpf_func_info *finfo;
+       __u32 info_len, rec_size, i;
+       void *func_info = NULL;
+       int err;
  
        /* get necessary lens */
        info_len = sizeof(struct bpf_prog_info);
        err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
        if (CHECK(err == -1, "invalid get info (1st) errno:%d", errno)) {
                fprintf(stderr, "%s\n", btf_log_buf);
-               err = -1;
-               goto done;
+               return -1;
        }
-       if (CHECK(info.func_info_cnt != 2,
-                 "incorrect info.func_info_cnt (1st) %d\n",
-                 info.func_info_cnt)) {
-               err = -1;
-               goto done;
+       if (CHECK(info.nr_func_info != test->func_info_cnt,
+                 "incorrect info.nr_func_info (1st) %d",
+                 info.nr_func_info)) {
+               return -1;
        }
        rec_size = info.func_info_rec_size;
-       if (CHECK(rec_size < 4,
-                 "incorrect info.func_info_rec_size (1st) %d\n", rec_size)) {
-               err = -1;
-               goto done;
+       if (CHECK(rec_size < 8,
+                 "incorrect info.func_info_rec_size (1st) %d", rec_size)) {
+               return -1;
        }
  
-       func_info = malloc(info.func_info_cnt * rec_size);
-       if (CHECK(!func_info, "out of memory")) {
-               err = -1;
-               goto done;
-       }
+       if (!info.nr_func_info)
+               return 0;
+       func_info = malloc(info.nr_func_info * rec_size);
+       if (CHECK(!func_info, "out of memory"))
+               return -1;
  
        /* reset info to only retrieve func_info related data */
        memset(&info, 0, sizeof(info));
-       info.func_info_cnt = 2;
+       info.nr_func_info = test->func_info_cnt;
        info.func_info_rec_size = rec_size;
        info.func_info = ptr_to_u64(func_info);
        err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
                err = -1;
                goto done;
        }
-       if (CHECK(info.func_info_cnt != 2,
-                 "incorrect info.func_info_cnt (2nd) %d\n",
-                 info.func_info_cnt)) {
+       if (CHECK(info.nr_func_info != test->func_info_cnt,
+                 "incorrect info.nr_func_info (2nd) %d",
+                 info.nr_func_info)) {
                err = -1;
                goto done;
        }
-       if (CHECK(info.func_info_rec_size != rec_size,
-                 "incorrect info.func_info_rec_size (2nd) %d\n",
+       if (CHECK(info.func_info_rec_size < 8,
+                 "incorrect info.func_info_rec_size (2nd) %d",
                  info.func_info_rec_size)) {
                err = -1;
                goto done;
        }
  
+       if (CHECK(!info.func_info,
+                 "info.func_info == 0. kernel.kptr_restrict is set?")) {
+               err = -1;
+               goto done;
+       }
        finfo = func_info;
-       for (i = 0; i < 2; i++) {
+       for (i = 0; i < test->func_info_cnt; i++) {
                if (CHECK(finfo->type_id != test->func_info[i][1],
                          "incorrect func_type %u expected %u",
                          finfo->type_id, test->func_info[i][1])) {
                finfo = (void *)finfo + rec_size;
        }
  
+       err = 0;
  done:
+       free(func_info);
+       return err;
+ }
+ static int test_get_linfo(const struct prog_info_raw_test *test,
+                         const void *patched_linfo,
+                         __u32 cnt, int prog_fd)
+ {
+       __u32 i, info_len, nr_jited_ksyms, nr_jited_func_lens;
+       __u64 *jited_linfo = NULL, *jited_ksyms = NULL;
+       __u32 rec_size, jited_rec_size, jited_cnt;
+       struct bpf_line_info *linfo = NULL;
+       __u32 cur_func_len, ksyms_found;
+       struct bpf_prog_info info = {};
+       __u32 *jited_func_lens = NULL;
+       __u64 cur_func_ksyms;
+       int err;
+       jited_cnt = cnt;
+       rec_size = sizeof(*linfo);
+       jited_rec_size = sizeof(*jited_linfo);
+       if (test->nr_jited_ksyms)
+               nr_jited_ksyms = test->nr_jited_ksyms;
+       else
+               nr_jited_ksyms = test->func_info_cnt;
+       nr_jited_func_lens = nr_jited_ksyms;
+       info_len = sizeof(struct bpf_prog_info);
+       err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+       if (CHECK(err == -1, "err:%d errno:%d", err, errno)) {
+               err = -1;
+               goto done;
+       }
+       if (!info.jited_prog_len) {
+               /* prog is not jited */
+               jited_cnt = 0;
+               nr_jited_ksyms = 1;
+               nr_jited_func_lens = 1;
+       }
+       if (CHECK(info.nr_line_info != cnt ||
+                 info.nr_jited_line_info != jited_cnt ||
+                 info.nr_jited_ksyms != nr_jited_ksyms ||
+                 info.nr_jited_func_lens != nr_jited_func_lens ||
+                 (!info.nr_line_info && info.nr_jited_line_info),
+                 "info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) nr_jited_ksyms:%u(expected:%u) nr_jited_func_lens:%u(expected:%u)",
+                 info.nr_line_info, cnt,
+                 info.nr_jited_line_info, jited_cnt,
+                 info.nr_jited_ksyms, nr_jited_ksyms,
+                 info.nr_jited_func_lens, nr_jited_func_lens)) {
+               err = -1;
+               goto done;
+       }
+       if (CHECK(info.line_info_rec_size < 16 ||
+                 info.jited_line_info_rec_size < 8,
+                 "info: line_info_rec_size:%u(userspace expected:%u) jited_line_info_rec_size:%u(userspace expected:%u)",
+                 info.line_info_rec_size, rec_size,
+                 info.jited_line_info_rec_size, jited_rec_size)) {
+               err = -1;
+               goto done;
+       }
+       if (!cnt)
+               return 0;
+       rec_size = info.line_info_rec_size;
+       jited_rec_size = info.jited_line_info_rec_size;
+       memset(&info, 0, sizeof(info));
+       linfo = calloc(cnt, rec_size);
+       if (CHECK(!linfo, "!linfo")) {
+               err = -1;
+               goto done;
+       }
+       info.nr_line_info = cnt;
+       info.line_info_rec_size = rec_size;
+       info.line_info = ptr_to_u64(linfo);
+       if (jited_cnt) {
+               jited_linfo = calloc(jited_cnt, jited_rec_size);
+               jited_ksyms = calloc(nr_jited_ksyms, sizeof(*jited_ksyms));
+               jited_func_lens = calloc(nr_jited_func_lens,
+                                        sizeof(*jited_func_lens));
+               if (CHECK(!jited_linfo || !jited_ksyms || !jited_func_lens,
+                         "jited_linfo:%p jited_ksyms:%p jited_func_lens:%p",
+                         jited_linfo, jited_ksyms, jited_func_lens)) {
+                       err = -1;
+                       goto done;
+               }
+               info.nr_jited_line_info = jited_cnt;
+               info.jited_line_info_rec_size = jited_rec_size;
+               info.jited_line_info = ptr_to_u64(jited_linfo);
+               info.nr_jited_ksyms = nr_jited_ksyms;
+               info.jited_ksyms = ptr_to_u64(jited_ksyms);
+               info.nr_jited_func_lens = nr_jited_func_lens;
+               info.jited_func_lens = ptr_to_u64(jited_func_lens);
+       }
+       err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
+       /*
+        * Only recheck the info.*line_info* fields.
+        * Other fields are not the concern of this test.
+        */
+       if (CHECK(err == -1 ||
+                 !info.line_info ||
+                 info.nr_line_info != cnt ||
+                 (jited_cnt && !info.jited_line_info) ||
+                 info.nr_jited_line_info != jited_cnt ||
+                 info.line_info_rec_size != rec_size ||
+                 info.jited_line_info_rec_size != jited_rec_size,
+                 "err:%d errno:%d info: nr_line_info:%u(expected:%u) nr_jited_line_info:%u(expected:%u) line_info_rec_size:%u(expected:%u) jited_linfo_rec_size:%u(expected:%u) line_info:%p jited_line_info:%p",
+                 err, errno,
+                 info.nr_line_info, cnt,
+                 info.nr_jited_line_info, jited_cnt,
+                 info.line_info_rec_size, rec_size,
+                 info.jited_line_info_rec_size, jited_rec_size,
+                 (void *)(long)info.line_info,
+                 (void *)(long)info.jited_line_info)) {
+               err = -1;
+               goto done;
+       }
+       CHECK(linfo[0].insn_off, "linfo[0].insn_off:%u",
+             linfo[0].insn_off);
+       for (i = 1; i < cnt; i++) {
+               const struct bpf_line_info *expected_linfo;
+               expected_linfo = patched_linfo + (i * test->line_info_rec_size);
+               if (CHECK(linfo[i].insn_off <= linfo[i - 1].insn_off,
+                         "linfo[%u].insn_off:%u <= linfo[%u].insn_off:%u",
+                         i, linfo[i].insn_off,
+                         i - 1, linfo[i - 1].insn_off)) {
+                       err = -1;
+                       goto done;
+               }
+               if (CHECK(linfo[i].file_name_off != expected_linfo->file_name_off ||
+                         linfo[i].line_off != expected_linfo->line_off ||
+                         linfo[i].line_col != expected_linfo->line_col,
+                         "linfo[%u] (%u, %u, %u) != (%u, %u, %u)", i,
+                         linfo[i].file_name_off,
+                         linfo[i].line_off,
+                         linfo[i].line_col,
+                         expected_linfo->file_name_off,
+                         expected_linfo->line_off,
+                         expected_linfo->line_col)) {
+                       err = -1;
+                       goto done;
+               }
+       }
+       if (!jited_cnt) {
+               fprintf(stderr, "not jited. skipping jited_line_info check. ");
+               err = 0;
+               goto done;
+       }
+       if (CHECK(jited_linfo[0] != jited_ksyms[0],
+                 "jited_linfo[0]:%lx != jited_ksyms[0]:%lx",
+                 (long)(jited_linfo[0]), (long)(jited_ksyms[0]))) {
+               err = -1;
+               goto done;
+       }
+       ksyms_found = 1;
+       cur_func_len = jited_func_lens[0];
+       cur_func_ksyms = jited_ksyms[0];
+       for (i = 1; i < jited_cnt; i++) {
+               if (ksyms_found < nr_jited_ksyms &&
+                   jited_linfo[i] == jited_ksyms[ksyms_found]) {
+                       cur_func_ksyms = jited_ksyms[ksyms_found];
+                       cur_func_len = jited_ksyms[ksyms_found];
+                       ksyms_found++;
+                       continue;
+               }
+               if (CHECK(jited_linfo[i] <= jited_linfo[i - 1],
+                         "jited_linfo[%u]:%lx <= jited_linfo[%u]:%lx",
+                         i, (long)jited_linfo[i],
+                         i - 1, (long)(jited_linfo[i - 1]))) {
+                       err = -1;
+                       goto done;
+               }
+               if (CHECK(jited_linfo[i] - cur_func_ksyms > cur_func_len,
+                         "jited_linfo[%u]:%lx - %lx > %u",
+                         i, (long)jited_linfo[i], (long)cur_func_ksyms,
+                         cur_func_len)) {
+                       err = -1;
+                       goto done;
+               }
+       }
+       if (CHECK(ksyms_found != nr_jited_ksyms,
+                 "ksyms_found:%u != nr_jited_ksyms:%u",
+                 ksyms_found, nr_jited_ksyms)) {
+               err = -1;
+               goto done;
+       }
+       err = 0;
+ done:
+       free(linfo);
+       free(jited_linfo);
+       free(jited_ksyms);
+       free(jited_func_lens);
+       return err;
+ }
+ static int do_test_info_raw(unsigned int test_num)
+ {
+       const struct prog_info_raw_test *test = &info_raw_tests[test_num - 1];
+       unsigned int raw_btf_size, linfo_str_off, linfo_size;
+       int btf_fd = -1, prog_fd = -1, err = 0;
+       void *raw_btf, *patched_linfo = NULL;
+       const char *ret_next_str;
+       union bpf_attr attr = {};
+       fprintf(stderr, "BTF prog info raw test[%u] (%s): ", test_num, test->descr);
+       raw_btf = btf_raw_create(&hdr_tmpl, test->raw_types,
+                                test->str_sec, test->str_sec_size,
+                                &raw_btf_size, &ret_next_str);
+       if (!raw_btf)
+               return -1;
+       *btf_log_buf = '\0';
+       btf_fd = bpf_load_btf(raw_btf, raw_btf_size,
+                             btf_log_buf, BTF_LOG_BUF_SIZE,
+                             args.always_log);
+       free(raw_btf);
+       if (CHECK(btf_fd == -1, "invalid btf_fd errno:%d", errno)) {
+               err = -1;
+               goto done;
+       }
+       if (*btf_log_buf && args.always_log)
+               fprintf(stderr, "\n%s", btf_log_buf);
+       *btf_log_buf = '\0';
+       linfo_str_off = ret_next_str - test->str_sec;
+       patched_linfo = patch_name_tbd(test->line_info,
+                                      test->str_sec, linfo_str_off,
+                                      test->str_sec_size, &linfo_size);
+       if (IS_ERR(patched_linfo)) {
+               fprintf(stderr, "error in creating raw bpf_line_info");
+               err = -1;
+               goto done;
+       }
+       attr.prog_type = test->prog_type;
+       attr.insns = ptr_to_u64(test->insns);
+       attr.insn_cnt = probe_prog_length(test->insns);
+       attr.license = ptr_to_u64("GPL");
+       attr.prog_btf_fd = btf_fd;
+       attr.func_info_rec_size = test->func_info_rec_size;
+       attr.func_info_cnt = test->func_info_cnt;
+       attr.func_info = ptr_to_u64(test->func_info);
+       attr.log_buf = ptr_to_u64(btf_log_buf);
+       attr.log_size = BTF_LOG_BUF_SIZE;
+       attr.log_level = 1;
+       if (linfo_size) {
+               attr.line_info_rec_size = test->line_info_rec_size;
+               attr.line_info = ptr_to_u64(patched_linfo);
+               attr.line_info_cnt = linfo_size / attr.line_info_rec_size;
+       }
+       prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
+       err = ((prog_fd == -1) != test->expected_prog_load_failure);
+       if (CHECK(err, "prog_fd:%d expected_prog_load_failure:%u errno:%d",
+                 prog_fd, test->expected_prog_load_failure, errno) ||
+           CHECK(test->err_str && !strstr(btf_log_buf, test->err_str),
+                 "expected err_str:%s", test->err_str)) {
+               err = -1;
+               goto done;
+       }
+       if (prog_fd == -1)
+               goto done;
+       err = test_get_finfo(test, prog_fd);
+       if (err)
+               goto done;
+       err = test_get_linfo(test, patched_linfo, attr.line_info_cnt, prog_fd);
+       if (err)
+               goto done;
+ done:
+       if (!err)
+               fprintf(stderr, "OK");
        if (*btf_log_buf && (err || args.always_log))
                fprintf(stderr, "\n%s", btf_log_buf);
  
                close(btf_fd);
        if (prog_fd != -1)
                close(prog_fd);
-       free(func_info);
+       if (!IS_ERR(patched_linfo))
+               free(patched_linfo);
        return err;
  }
  
- static int test_func_type(void)
+ static int test_info_raw(void)
  {
        unsigned int i;
        int err = 0;
  
-       for (i = 0; i < ARRAY_SIZE(func_type_test); i++)
-               err |= count_result(do_test_func_type(i));
+       if (args.info_raw_test_num)
+               return count_result(do_test_info_raw(args.info_raw_test_num));
+       for (i = 1; i <= ARRAY_SIZE(info_raw_tests); i++)
+               err |= count_result(do_test_info_raw(i));
  
        return err;
  }
  
  static void usage(const char *cmd)
  {
-       fprintf(stderr, "Usage: %s [-l] [[-r test_num (1 - %zu)] |"
-                       " [-g test_num (1 - %zu)] |"
-                       " [-f test_num (1 - %zu)] | [-p] | [-k] ]\n",
+       fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
+                       "\t[-g btf_get_info_test_num (1 - %zu)] |\n"
+                       "\t[-f btf_file_test_num (1 - %zu)] |\n"
+                       "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
+                       "\t[-p (pretty print test)]]\n",
                cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
-               ARRAY_SIZE(file_tests));
+               ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests));
  }
  
  static int parse_args(int argc, char **argv)
  {
-       const char *optstr = "lpkf:r:g:";
+       const char *optstr = "lpk:f:r:g:";
        int opt;
  
        while ((opt = getopt(argc, argv, optstr)) != -1) {
                        args.pprint_test = true;
                        break;
                case 'k':
-                       args.func_type_test = true;
+                       args.info_raw_test_num = atoi(optarg);
+                       args.info_raw_test = true;
                        break;
                case 'h':
                        usage(argv[0]);
                return -1;
        }
  
+       if (args.info_raw_test_num &&
+           (args.info_raw_test_num < 1 ||
+            args.info_raw_test_num > ARRAY_SIZE(info_raw_tests))) {
+               fprintf(stderr, "BTF prog info raw test number must be [1 - %zu]\n",
+                       ARRAY_SIZE(info_raw_tests));
+               return -1;
+       }
        return 0;
  }
  
@@@ -3800,16 -4046,17 +4407,17 @@@ int main(int argc, char **argv
        if (args.pprint_test)
                err |= test_pprint();
  
-       if (args.func_type_test)
-               err |= test_func_type();
+       if (args.info_raw_test)
+               err |= test_info_raw();
  
        if (args.raw_test || args.get_info_test || args.file_test ||
-           args.pprint_test || args.func_type_test)
+           args.pprint_test || args.info_raw_test)
                goto done;
  
        err |= test_raw();
        err |= test_get_info();
        err |= test_file();
+       err |= test_info_raw();
  
  done:
        print_summary();
index 1b7760d174d70de611d13747b7f89f60b2f2cd1f,957e4711c46cece47c28f419f174eb6e10236bc8..a08c67c8767ee7447745cccb2c476236e9cfbf58
@@@ -721,8 -721,18 +721,18 @@@ static struct bpf_test tests[] = 
                        BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
                        BPF_EXIT_INSN(),
                },
-               .result = REJECT,
-               .errstr = "unknown opcode c4",
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "arsh32 on imm 2",
+               .insns = {
+                       BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
+                       BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = -16069393,
        },
        {
                "arsh32 on reg",
                        BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
-               .result = REJECT,
-               .errstr = "unknown opcode cc",
+               .result = ACCEPT,
+               .retval = 0,
+       },
+       {
+               "arsh32 on reg 2",
+               .insns = {
+                       BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
+                       BPF_MOV64_IMM(BPF_REG_1, 15),
+                       BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 43724,
        },
        {
                "arsh64 on imm",
                .errstr = "invalid bpf_context access",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SK_MSG,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "direct packet read for SK_MSG",
                },
                .errstr = "invalid bpf_context access",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "check cb access: half, wrong type",
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
+       {
+               "alu32: mov u32 const",
+               .insns = {
+                       BPF_MOV32_IMM(BPF_REG_7, 0),
+                       BPF_ALU32_IMM(BPF_AND, BPF_REG_7, 1),
+                       BPF_MOV32_REG(BPF_REG_0, BPF_REG_7),
+                       BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
+                       BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
+                       BPF_EXIT_INSN(),
+               },
+               .result = ACCEPT,
+               .retval = 0,
+       },
        {
                "unpriv: partial copy of pointer",
                .insns = {
                .result = REJECT,
                .errstr = "R0 invalid mem access 'inv'",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "raw_stack: skb_load_bytes, spilled regs corruption 2",
                .result = REJECT,
                .errstr = "R3 invalid mem access 'inv'",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "raw_stack: skb_load_bytes, spilled regs + data",
                .errstr = "R2 invalid mem access 'inv'",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "direct packet access: test16 (arith on data_end)",
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "direct packet access: test21 (x += pkt_ptr, 2)",
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "direct packet access: test22 (x += pkt_ptr, 3)",
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "direct packet access: test23 (x += pkt_ptr, 4)",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = REJECT,
                .errstr = "invalid access to packet, off=0 size=8, R5(id=1,off=0,r=0)",
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "direct packet access: test24 (x += pkt_ptr, 5)",
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "direct packet access: test25 (marking on <, good access)",
                .result = REJECT,
                .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
                .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid cgroup storage access 5",
                .result = REJECT,
                .errstr = "invalid access to map value, value_size=64 off=-2 size=4",
                .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "invalid per-cpu cgroup storage access 5",
                .errstr = "invalid mem access 'inv'",
                .result = REJECT,
                .result_unpriv = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "map element value illegal alu op, 5",
                .fixup_map_hash_48b = { 3 },
                .errstr = "R0 invalid mem access 'inv'",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "map element value is preserved across register spilling",
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .retval = 0 /* csum_diff of 64-byte packet */,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
                        BPF_JMP_IMM(BPF_JA, 0, 0, -7),
                },
                .fixup_map_hash_8b = { 4 },
 -              .errstr = "R0 invalid mem access 'inv'",
 +              .errstr = "unbounded min value",
                .result = REJECT,
        },
        {
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data' > pkt_end, bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_end > pkt_data', good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_end > pkt_data', bad access 2",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data' < pkt_end, good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data' < pkt_end, bad access 2",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_end < pkt_data', good access",
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_end < pkt_data', bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data' >= pkt_end, good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data' >= pkt_end, bad access 2",
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_end >= pkt_data', bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data' <= pkt_end, good access",
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data' <= pkt_end, bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_end <= pkt_data', good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_end <= pkt_data', bad access 2",
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_meta' > pkt_data, bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data > pkt_meta', good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data > pkt_meta', bad access 2",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_meta' < pkt_data, good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_meta' < pkt_data, bad access 2",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data < pkt_meta', good access",
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data < pkt_meta', bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_meta' >= pkt_data, good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_meta' >= pkt_data, bad access 2",
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data >= pkt_meta', bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_meta' <= pkt_data, good access",
                },
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_meta' <= pkt_data, bad access 1",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data <= pkt_meta', good access",
                .errstr = "R1 offset is outside of the packet",
                .result = REJECT,
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "XDP pkt read, pkt_data <= pkt_meta', bad access 2",
                "check deducing bounds from const, 5",
                .insns = {
                        BPF_MOV64_IMM(BPF_REG_0, 0),
 -                      BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
 +                      BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
                        BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
                        BPF_EXIT_INSN(),
                },
                },
                .result = REJECT,
                .errstr = "dereference of modified ctx ptr",
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "check deducing bounds from const, 8",
                },
                .result = REJECT,
                .errstr = "dereference of modified ctx ptr",
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "check deducing bounds from const, 9",
                .result = REJECT,
                .errstr = "R6 invalid mem access 'inv'",
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: two calls with args",
                .fixup_map_hash_8b = { 12, 22 },
                .result = REJECT,
                .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2",
                .fixup_map_hash_8b = { 12, 22 },
                .result = REJECT,
                .errstr = "invalid access to map value, value_size=8 off=2 size=8",
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: two calls that receive map_value_ptr_or_null via arg. test1",
                .result = ACCEPT,
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .retval = POINTER_VALUE,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 2",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .errstr = "invalid access to packet",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 3",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
                .retval = 1,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 4",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
                .retval = 1,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 5",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .errstr = "same insn cannot be used with different",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 6",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .errstr = "R4 invalid mem access",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 7",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .errstr = "R4 invalid mem access",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 8",
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: pkt_ptr spill into caller stack 9",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .errstr = "invalid access to packet",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "calls: caller stack init to zero or map_value_or_null",
                .result = REJECT,
                .errstr = "BPF_XADD stores into R2 pkt is not allowed",
                .prog_type = BPF_PROG_TYPE_XDP,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "xadd/w check whether src/dst got mangled, 1",
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .errstr = "Unreleased reference",
                .result = REJECT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "reference tracking: alloc, check, free in both subbranches",
                },
                .prog_type = BPF_PROG_TYPE_SCHED_CLS,
                .result = ACCEPT,
+               .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
        },
        {
                "reference tracking in call: free reference in subprog",
                .result_unpriv = REJECT,
                .result = ACCEPT,
        },
+       {
+               "check wire_len is not readable by sockets",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, wire_len)),
+                       BPF_EXIT_INSN(),
+               },
+               .errstr = "invalid bpf_context access",
+               .result = REJECT,
+       },
+       {
+               "check wire_len is readable by tc classifier",
+               .insns = {
+                       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+                                   offsetof(struct __sk_buff, wire_len)),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .result = ACCEPT,
+       },
+       {
+               "check wire_len is not writable by tc classifier",
+               .insns = {
+                       BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_1,
+                                   offsetof(struct __sk_buff, wire_len)),
+                       BPF_EXIT_INSN(),
+               },
+               .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+               .errstr = "invalid bpf_context access",
+               .errstr_unpriv = "R1 leaks addr",
+               .result = REJECT,
+       },
  };
  
  static int probe_filter_length(const struct bpf_insn *fp)
  static void do_test_single(struct bpf_test *test, bool unpriv,
                           int *passes, int *errors)
  {
-       int fd_prog, expected_ret, reject_from_alignment;
+       int fd_prog, expected_ret, alignment_prevented_execution;
        int prog_len, prog_type = test->prog_type;
        struct bpf_insn *prog = test->insns;
        int map_fds[MAX_NR_MAPS];
        const char *expected_err;
        uint32_t expected_val;
        uint32_t retval;
+       __u32 pflags;
        int i, err;
  
        for (i = 0; i < MAX_NR_MAPS; i++)
        do_test_fixup(test, prog_type, prog, map_fds);
        prog_len = probe_filter_length(prog);
  
-       fd_prog = bpf_verify_program(prog_type, prog, prog_len,
-                                    test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
+       pflags = 0;
+       if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
+               pflags |= BPF_F_STRICT_ALIGNMENT;
+       if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
+               pflags |= BPF_F_ANY_ALIGNMENT;
+       fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
                                     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
  
        expected_ret = unpriv && test->result_unpriv != UNDEF ?
        expected_val = unpriv && test->retval_unpriv ?
                       test->retval_unpriv : test->retval;
  
-       reject_from_alignment = fd_prog < 0 &&
-                               (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) &&
-                               strstr(bpf_vlog, "misaligned");
- #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-       if (reject_from_alignment) {
-               printf("FAIL\nFailed due to alignment despite having efficient unaligned access: '%s'!\n",
-                      strerror(errno));
-               goto fail_log;
-       }
- #endif
+       alignment_prevented_execution = 0;
        if (expected_ret == ACCEPT) {
-               if (fd_prog < 0 && !reject_from_alignment) {
+               if (fd_prog < 0) {
                        printf("FAIL\nFailed to load prog '%s'!\n",
                               strerror(errno));
                        goto fail_log;
                }
+ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+               if (fd_prog >= 0 &&
+                   (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) {
+                       alignment_prevented_execution = 1;
+                       goto test_ok;
+               }
+ #endif
        } else {
                if (fd_prog >= 0) {
                        printf("FAIL\nUnexpected success to load!\n");
                        goto fail_log;
                }
-               if (!strstr(bpf_vlog, expected_err) && !reject_from_alignment) {
+               if (!strstr(bpf_vlog, expected_err)) {
                        printf("FAIL\nUnexpected error message!\n\tEXP: %s\n\tRES: %s\n",
                              expected_err, bpf_vlog);
                        goto fail_log;
                        goto fail_log;
                }
        }
+ #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+ test_ok:
+ #endif
        (*passes)++;
-       printf("OK%s\n", reject_from_alignment ?
-              " (NOTE: reject due to unknown alignment)" : "");
+       printf("OK%s\n", alignment_prevented_execution ?
+              " (NOTE: not executed due to unknown alignment)" : "");
  close_fds:
        close(fd_prog);
        for (i = 0; i < MAX_NR_MAPS; i++)