Merge tag 'pm-4.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 5 Jul 2018 16:50:18 +0000 (09:50 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 5 Jul 2018 16:50:18 +0000 (09:50 -0700)
Pull power management fixes from Rafael Wysocki:
 "These fix a PCI power management regression introduced during the 4.17
  cycle and fix up the recently added support for devices in multiple
  power domains.

  Specifics:

   - Resume parallel PCI (non-PCIe) bridges on suspend-to-RAM (ACP S3)
     to avoid confusing the platform firmware which started to happen
     after a core power management regression fix that went in during
     the 4.17 cycle (Rafael Wysocki).

   - Fix up the recently added support for devices in multiple power
     domains by avoiding to power up the entire domain unnecessarily
     when attaching a device to it (Ulf Hansson)"

* tag 'pm-4.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
  PM / Domains: Don't power on at attach for the multi PM domain case
  PCI / ACPI / PM: Resume bridges w/o drivers on suspend-to-RAM

133 files changed:
Makefile
arch/arm/firmware/Makefile
arch/arm/net/bpf_jit_32.c
arch/m68k/include/asm/mcf_pgalloc.h
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/kernel/entry.S
arch/openrisc/kernel/head.S
arch/openrisc/kernel/traps.c
arch/riscv/Kconfig
arch/riscv/include/uapi/asm/elf.h
arch/riscv/kernel/irq.c
arch/riscv/kernel/module.c
arch/riscv/kernel/ptrace.c
arch/riscv/kernel/setup.c
arch/riscv/mm/init.c
arch/s390/net/bpf_jit_comp.c
drivers/atm/iphase.c
drivers/atm/zatm.c
drivers/infiniband/hw/mlx5/main.c
drivers/md/md.c
drivers/md/raid10.c
drivers/media/rc/bpf-lirc.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/freescale/fman/fman_port.c
drivers/net/ethernet/huawei/hinic/hinic_rx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
drivers/net/ethernet/mellanox/mlx5/core/fw.c
drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/mellanox/mlx5/core/sriov.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/netronome/nfp/bpf/main.c
drivers/net/ethernet/netronome/nfp/flower/match.c
drivers/net/ethernet/netronome/nfp/flower/offload.c
drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
drivers/net/ethernet/qlogic/qed/qed_dcbx.c
drivers/net/ethernet/qlogic/qed/qed_dev.c
drivers/net/ethernet/qlogic/qed/qed_main.c
drivers/net/ethernet/qlogic/qed/qed_sriov.c
drivers/net/ethernet/qlogic/qede/qede_ptp.c
drivers/net/ethernet/sfc/farch.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
drivers/net/ethernet/stmicro/stmmac/hwif.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/geneve.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ipvlan/ipvlan_main.c
drivers/net/phy/dp83tc811.c
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/virtio_net.c
drivers/net/vxlan.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
fs/userfaultfd.c
include/linux/bpf-cgroup.h
include/linux/bpf.h
include/linux/bpf_lirc.h
include/linux/filter.h
include/linux/mlx5/eswitch.h
include/linux/mlx5/mlx5_ifc.h
include/linux/netdevice.h
include/net/net_namespace.h
include/net/netns/ipv6.h
include/net/pkt_cls.h
include/uapi/linux/bpf.h
kernel/bpf/cgroup.c
kernel/bpf/core.c
kernel/bpf/sockmap.c
kernel/bpf/syscall.c
lib/test_bpf.c
mm/debug.c
mm/hugetlb.c
mm/kasan/kasan.c
net/8021q/vlan.c
net/Makefile
net/bpfilter/Kconfig
net/bpfilter/Makefile
net/bpfilter/bpfilter_kern.c
net/bpfilter/bpfilter_umh_blob.S [new file with mode: 0644]
net/core/dev_ioctl.c
net/core/fib_rules.c
net/core/filter.c
net/core/skbuff.c
net/core/sock.c
net/ipv4/fou.c
net/ipv4/gre_offload.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/seg6_hmac.c
net/mac80211/tx.c
net/netfilter/nf_conncount.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_log.c
net/netfilter/nfnetlink_queue.c
net/rds/connection.c
net/rds/loop.c
net/rds/loop.h
net/smc/af_smc.c
net/smc/smc.h
net/strparser/strparser.c
net/wireless/nl80211.c
samples/bpf/xdp_fwd_kern.c
scripts/cc-can-link.sh
tools/bpf/bpftool/prog.c
tools/testing/selftests/bpf/config
tools/testing/selftests/bpf/test_kmod.sh
tools/testing/selftests/bpf/test_lirc_mode2.sh
tools/testing/selftests/bpf/test_lwt_seg6local.sh
tools/testing/selftests/bpf/test_sockmap.c
tools/testing/selftests/net/fib_tests.sh [changed mode: 0644->0755]

index c5ce55cbc543c9c676580381f2a1cf727e8e7cd7..d15ac32afbaf03bac1095938a31a6295318ccaac 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
   KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
 endif
 
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
-  CC_CAN_LINK := y
-  export CC_CAN_LINK
-endif
-
 # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
 # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
 # CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
index a71f16536b6c178c09334efc3047f4ddcc8da91b..6e41336b0bc4fc71ebaf5f5f4bae7e5e9e1b0395 100644 (file)
@@ -1 +1,4 @@
 obj-$(CONFIG_TRUSTED_FOUNDATIONS)      += trusted_foundations.o
+
+# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
+KCOV_INSTRUMENT                := n
index 6e8b7161303936908b3b2b7adfced5d17de379ce..f6a62ae44a65b61e162203ad261a7fbb5d4b34cf 100644 (file)
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
                /* there are 2 passes here */
                bpf_jit_dump(prog->len, image_size, 2, ctx.target);
 
-       set_memory_ro((unsigned long)header, header->pages);
+       bpf_jit_binary_lock_ro(header);
        prog->bpf_func = (void *)ctx.target;
        prog->jited = 1;
        prog->jited_len = image_size;
index 8b707c249026032ac8bef80c6f1666c105d9b50d..12fe700632f458ea632a18bb9cdccd6660efd241 100644 (file)
@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
                                  unsigned long address)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
        return page;
 }
 
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
 {
+       pgtable_page_dtor(page);
        __free_page(page);
 }
 
index 3e1a46615120a566adbfff65b5aab8e860bf3809..8999b922651210f6c20c83e7aa72b3bccf6c3d58 100644 (file)
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
        __free_page(pte);
 }
 
+#define __pte_free_tlb(tlb, pte, addr) \
+do {                                   \
+       pgtable_page_dtor(pte);         \
+       tlb_remove_page((tlb), (pte));  \
+} while (0)
 
-#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
 #define pmd_pgtable(pmd) pmd_page(pmd)
 
 #define check_pgt_cache()          do { } while (0)
index 690d55272ba688a2adc88bca00e66cc61903c711..0c826ad6e994cce359474229acf08ff0d0330b78 100644 (file)
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
        l.addi  r3,r1,0                    // pt_regs
        /* r4 set be EXCEPTION_HANDLE */   // effective address of fault
 
-       /*
-        * __PHX__: TODO
-        *
-        * all this can be written much simpler. look at
-        * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
-        */
 #ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
        l.lwz   r6,PT_PC(r3)               // address of an offending insn
        l.lwz   r6,0(r6)                   // instruction that caused pf
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
 
 #else
 
-       l.lwz   r6,PT_SR(r3)               // SR
+       l.mfspr r6,r0,SPR_SR               // SR
        l.andi  r6,r6,SPR_SR_DSX           // check for delay slot exception
        l.sfne  r6,r0                      // exception happened in delay slot
        l.bnf   7f
index fb02b2a1d6f2d875372b125cf837feb119d0164e..9fc6b60140f007bea1442f60727a22aee24776c9 100644 (file)
  *      r4  - EEAR     exception EA
  *      r10 - current  pointing to current_thread_info struct
  *      r12 - syscall  0, since we didn't come from syscall
- *      r13 - temp     it actually contains new SR, not needed anymore
- *      r31 - handler  address of the handler we'll jump to
+ *      r30 - handler  address of the handler we'll jump to
  *
  *      handler has to save remaining registers to the exception
  *      ksp frame *before* tainting them!
        /* r1 is KSP, r30 is __pa(KSP) */                       ;\
        tophys  (r30,r1)                                        ;\
        l.sw    PT_GPR12(r30),r12                               ;\
+       /* r4 use for tmp before EA */                          ;\
        l.mfspr r12,r0,SPR_EPCR_BASE                            ;\
        l.sw    PT_PC(r30),r12                                  ;\
        l.mfspr r12,r0,SPR_ESR_BASE                             ;\
        /* r12 == 1 if we come from syscall */                  ;\
        CLEAR_GPR(r12)                                          ;\
        /* ----- turn on MMU ----- */                           ;\
-       l.ori   r30,r0,(EXCEPTION_SR)                           ;\
+       /* Carry DSX into exception SR */                       ;\
+       l.mfspr r30,r0,SPR_SR                                   ;\
+       l.andi  r30,r30,SPR_SR_DSX                              ;\
+       l.ori   r30,r30,(EXCEPTION_SR)                          ;\
        l.mtspr r0,r30,SPR_ESR_BASE                             ;\
        /* r30: EA address of handler */                        ;\
        LOAD_SYMBOL_2_GPR(r30,handler)                          ;\
index fac246e6f37a278e4cd7c001c2cd53a8df88dc4e..d8981cbb852a5f1fc1ea80667df3ed451579d13c 100644 (file)
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
                return 0;
        }
 #else
-       return regs->sr & SPR_SR_DSX;
+       return mfspr(SPR_SR) & SPR_SR_DSX;
 #endif
 }
 
index f12680c9b9475e2b130da3369644e797575f7a80..4764fdeb4f1f6837c771e42f9e84ca5bd8291af6 100644 (file)
@@ -107,6 +107,7 @@ config ARCH_RV32I
        select GENERIC_LIB_ASHLDI3
        select GENERIC_LIB_ASHRDI3
        select GENERIC_LIB_LSHRDI3
+       select GENERIC_LIB_UCMPDI2
 
 config ARCH_RV64I
        bool "RV64I"
index 5cae4c30cd8e2e2147b59285f9eccaaae7e9a789..1e0dfc36aab9e597aaf0d0fb3c99b2ac3dcae750 100644 (file)
@@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t;
 
 typedef union __riscv_fp_state elf_fpregset_t;
 
-#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
-#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info)                ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info)                ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info)       ELF32_R_TYPE(r_info)
+#endif
 
 /*
  * RISC-V relocation types
index b74cbfbce2d0dd9df65ba779ab9dd8feb0d38eed..7bcdaed15703be6d8f141a8582abd024dc966fec 100644 (file)
 #include <linux/irqchip.h>
 #include <linux/irqdomain.h>
 
-#ifdef CONFIG_RISCV_INTC
-#include <linux/irqchip/irq-riscv-intc.h>
-#endif
-
 void __init init_IRQ(void)
 {
        irqchip_init();
index 1d5e9b934b8ca5b5b78a64af5e1c06e334b7e5f2..3303ed2cd4193f82c51730a992d6c875b361ff80 100644 (file)
@@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
 static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
                                     Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm12 = (offset & 0x1000) << (31 - 12);
        u32 imm11 = (offset & 0x800) >> (11 - 7);
        u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
@@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
                                  Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u32 imm20 = (offset & 0x100000) << (31 - 20);
        u32 imm19_12 = (offset & 0xff000);
        u32 imm11 = (offset & 0x800) << (20 - 11);
@@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm8 = (offset & 0x100) << (12 - 8);
        u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
        u16 imm5 = (offset & 0x20) >> (5 - 2);
@@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
 static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        u16 imm11 = (offset & 0x800) << (12 - 11);
        u16 imm10 = (offset & 0x400) >> (10 - 8);
        u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
 static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
                                         Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        if (offset != (s32)offset) {
@@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
 static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 hi20;
 
        /* Always emit the got entry */
@@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
                                       Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
 static int apply_r_riscv_call_rela(struct module *me, u32 *location,
                                   Elf_Addr v)
 {
-       s64 offset = (void *)v - (void *)location;
+       ptrdiff_t offset = (void *)v - (void *)location;
        s32 fill_v = offset;
        u32 hi20, lo12;
 
@@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
 static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location += (*(u32 *)v);
+       *(u32 *)location += (u32)v;
        return 0;
 }
 
 static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
                                    Elf_Addr v)
 {
-       *(u32 *)location -= (*(u32 *)v);
+       *(u32 *)location -= (u32)v;
        return 0;
 }
 
@@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                        unsigned int j;
 
                        for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
-                               u64 hi20_loc =
+                               unsigned long hi20_loc =
                                        sechdrs[sechdrs[relsec].sh_info].sh_addr
                                        + rel[j].r_offset;
                                u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
@@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
                                        Elf_Sym *hi20_sym =
                                                (Elf_Sym *)sechdrs[symindex].sh_addr
                                                + ELF_RISCV_R_SYM(rel[j].r_info);
-                                       u64 hi20_sym_val =
+                                       unsigned long hi20_sym_val =
                                                hi20_sym->st_value
                                                + rel[j].r_addend;
 
                                        /* Calculate lo12 */
-                                       u64 offset = hi20_sym_val - hi20_loc;
+                                       size_t offset = hi20_sym_val - hi20_loc;
                                        if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
                                            && hi20_type == R_RISCV_GOT_HI20) {
                                                offset = module_emit_got_entry(
index ba3e80712797c8ece03b07930f2ffb4b370588cc..9f82a7e34c648a370ec42f2e0bad711058e9baf2 100644 (file)
@@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target,
        struct pt_regs *regs;
 
        regs = task_pt_regs(target);
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
        return ret;
 }
 
index ee44a48faf79dfd8e01cc07db341cacbeb9feed6..f0d2070866d49b170da74ae0e20e779f55a02199 100644 (file)
@@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p)
        riscv_fill_hwcap();
 }
 
-static int __init riscv_device_init(void)
-{
-       return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-subsys_initcall_sync(riscv_device_init);
index c77df8142be2eaa9525130b3cbea70e191a20aba..58a522f9bcc319ae5d40a8ae15da5d9021921ebd 100644 (file)
@@ -28,7 +28,9 @@ static void __init zone_sizes_init(void)
 {
        unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
 
+#ifdef CONFIG_ZONE_DMA32
        max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+#endif
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
        free_area_init_nodes(max_zone_pfns);
index d2db8acb1a55480895e38fdf142c3d074610230d..5f0234ec8038eb2d11e93b190f3f35e29f29207b 100644 (file)
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
                goto free_addrs;
        }
        if (bpf_jit_prog(&jit, fp)) {
+               bpf_jit_binary_free(header);
                fp = orig_fp;
                goto free_addrs;
        }
index ff81a576347e5154c10c997717548be69e81bbab..82532c299bb5964a429e81353b9c5f94d9bb5ed2 100644 (file)
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
        skb_queue_head_init(&iadev->rx_dma_q);  
        iadev->rx_free_desc_qhead = NULL;   
 
-       iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+       iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
        if (!iadev->rx_open) {
                printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
                dev->number);  
index a8d2eb0ceb8d8f78788182f81f8e1e9f9dc8fbbb..2c288d1f42bba0fcdf31ccec72c069bfa60688b9 100644 (file)
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
                                        return -EFAULT;
                                if (pool < 0 || pool > ZATM_LAST_POOL)
                                        return -EINVAL;
+                               pool = array_index_nospec(pool,
+                                                         ZATM_LAST_POOL + 1);
                                if (copy_from_user(&info,
                                    &((struct zatm_pool_req __user *) arg)->info,
                                    sizeof(info))) return -EFAULT;
index e3e330f59c2c01216f3e8e90a161c06c73c5e4ec..b3ba9a222550750f9c92a1ea8d1cf23b93e05d12 100644 (file)
@@ -6113,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
                             MLX5_CAP_GEN(mdev, num_vhca_ports));
 
-       if (MLX5_VPORT_MANAGER(mdev) &&
+       if (MLX5_ESWITCH_MANAGER(mdev) &&
            mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
                dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
 
index 29b0cd9ec951ee4603279656e7148ba2a5b8d763..994aed2f9dfff4135170102265523045e893ac0a 100644 (file)
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
                else
                        pr_warn("md: personality for level %s is not loaded!\n",
                                mddev->clevel);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
        spin_unlock(&pers_lock);
        if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
            pers->start_reshape == NULL) {
                /* This personality cannot handle reshaping... */
                module_put(pers->owner);
-               return -EINVAL;
+               err = -EINVAL;
+               goto abort;
        }
 
        if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
                mddev->private = NULL;
                module_put(pers->owner);
                bitmap_destroy(mddev);
-               return err;
+               goto abort;
        }
        if (mddev->queue) {
                bool nonrot = true;
index 478cf446827f469c1d02d6f2918fcb8dd870f893..35bd3a62451b30fec0cca41fcdc687bb7920aa56 100644 (file)
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
                            disk->rdev->saved_raid_disk < 0)
                                conf->fullsync = 1;
                }
+
+               if (disk->replacement &&
+                   !test_bit(In_sync, &disk->replacement->flags) &&
+                   disk->replacement->saved_raid_disk < 0) {
+                       conf->fullsync = 1;
+               }
+
                disk->recovery_disabled = mddev->recovery_disabled - 1;
        }
 
index 40826bba06b6d52c06bef7eb64bea6a719496dd0..fcfab6635f9c6649a64e0b144f55df672d621389 100644 (file)
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
        bpf_prog_array_free(rcdev->raw->progs);
 }
 
-int lirc_prog_attach(const union bpf_attr *attr)
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
 {
-       struct bpf_prog *prog;
        struct rc_dev *rcdev;
        int ret;
 
        if (attr->attach_flags)
                return -EINVAL;
 
-       prog = bpf_prog_get_type(attr->attach_bpf_fd,
-                                BPF_PROG_TYPE_LIRC_MODE2);
-       if (IS_ERR(prog))
-               return PTR_ERR(prog);
-
        rcdev = rc_dev_get_from_fd(attr->target_fd);
-       if (IS_ERR(rcdev)) {
-               bpf_prog_put(prog);
+       if (IS_ERR(rcdev))
                return PTR_ERR(rcdev);
-       }
 
        ret = lirc_bpf_attach(rcdev, prog);
-       if (ret)
-               bpf_prog_put(prog);
 
        put_device(&rcdev->dev);
 
index 567ee54504bcd6eba897009259f691b74b77609e..5e5022fa1d047be078be911bc4f6cd0631f04de7 100644 (file)
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct alx_priv *alx = pci_get_drvdata(pdev);
        struct alx_hw *hw = &alx->hw;
+       int err;
 
        alx_reset_phy(hw);
 
        if (!netif_running(alx->dev))
                return 0;
        netif_device_attach(alx->dev);
-       return __alx_open(alx, true);
+
+       rtnl_lock();
+       err = __alx_open(alx, true);
+       rtnl_unlock();
+
+       return err;
 }
 
 static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
index d847e1b9c37b5afff33e799e919e3ff39b5cd1e8..be1506169076f0a89f6a621d01dce81afe720ba7 100644 (file)
@@ -1533,6 +1533,7 @@ struct bnx2x {
        struct link_vars        link_vars;
        u32                     link_cnt;
        struct bnx2x_link_report_data last_reported_link;
+       bool                    force_link_down;
 
        struct mdio_if_info     mdio;
 
index 8cd73ff5debc276aec53d1f056fe3040875b2c0a..af7b5a4d8ba044800b0eb229d8c989c564515e94 100644 (file)
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
 {
        struct bnx2x_link_report_data cur_data;
 
+       if (bp->force_link_down) {
+               bp->link_vars.link_up = 0;
+               return;
+       }
+
        /* reread mf_cfg */
        if (IS_PF(bp) && !CHIP_IS_E1(bp))
                bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
                bp->pending_max = 0;
        }
 
+       bp->force_link_down = false;
        if (bp->port.pmf) {
                rc = bnx2x_initial_phy_init(bp, load_mode);
                if (rc)
index 5b1ed240bf18be0963cc580ab4256b6adc924046..57348f2b49a31fd5b1ef5a67d2ba1e7945768ab0 100644 (file)
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
                bp->sp_rtnl_state = 0;
                smp_mb();
 
+               /* Immediately indicate link as down */
+               bp->link_vars.link_up = 0;
+               bp->force_link_down = true;
+               netif_carrier_off(bp->dev);
+               BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
                bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
                /* When ret value shows failure of allocation failure,
                 * the nic is rebooted again. If open still fails, a error
index 30273a7717e2df797890da57e229ce31e9d957e2..4fd829b5e65d14b56337e63fc480dd72c8420eeb 100644 (file)
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
        id_tbl->max = size;
        id_tbl->next = next;
        spin_lock_init(&id_tbl->lock);
-       id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+       id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
        if (!id_tbl->table)
                return -ENOMEM;
 
index 3e93df5d4e3b2573f88cc427e7eefc6d1930e3ff..96cc03a6d9420f52c7c871f52f32c8e9b8973bbb 100644 (file)
@@ -3726,6 +3726,8 @@ static int at91ether_init(struct platform_device *pdev)
        int err;
        u32 reg;
 
+       bp->queues[0].bp = bp;
+
        dev->netdev_ops = &at91ether_netdev_ops;
        dev->ethtool_ops = &macb_ethtool_ops;
 
index 5f4e1ffa7b95fe4f8d2bb6447764951c51fffc67..ab02057ac7304f088242a2a07481820302d3556b 100644 (file)
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
 /* Default alignment for start of data in an Rx FD */
 #define DPAA_FD_DATA_ALIGNMENT  16
 
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
 /* Values for the L3R field of the FM Parse Results
  */
 /* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 
        if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
                nr_frags = skb_shinfo(skb)->nr_frags;
-               dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
-                                sizeof(struct qm_sg_entry) * (1 + nr_frags),
+               dma_unmap_single(dev, addr,
+                                qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
                                 dma_dir);
 
                /* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        void *sgt_buf;
 
        /* get a page frag to store the SGTable */
-       sz = SKB_DATA_ALIGN(priv->tx_headroom +
-               sizeof(struct qm_sg_entry) * (1 + nr_frags));
+       sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
        sgt_buf = netdev_alloc_frag(sz);
        if (unlikely(!sgt_buf)) {
                netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
        skbh = (struct sk_buff **)buffer_start;
        *skbh = skb;
 
-       addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
-                             sizeof(struct qm_sg_entry) * (1 + nr_frags),
-                             dma_dir);
+       addr = dma_map_single(dev, buffer_start,
+                             priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
        if (unlikely(dma_mapping_error(dev, addr))) {
                dev_err(dev, "DMA mapping failed");
                err = -EINVAL;
index ce6e24c74978a22a1d22383f0a5b4f38ffec7c00..ecbf6187e13a1fe3d6dba06015ff9cc49aed6224 100644 (file)
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
 #define HWP_HXS_PHE_REPORT 0x00000800
 #define HWP_HXS_PCAC_PSTAT 0x00000100
 #define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
 struct fman_port_hwp_regs {
        struct {
                u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
                iowrite32be(0xffffffff, &regs->pmda[i].lcv);
        }
 
+       /* Short packet padding removal from checksum calculation */
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+       iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
        start_port_hwp(port);
 }
 
index e2e5cdc7119c3ed0e890f99c7b30996d72d280e9..4c0f7eda1166c5df202c3b9a71cc2e43516531fb 100644 (file)
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
 {
        struct hinic_rq *rq = rxq->rq;
 
+       irq_set_affinity_hint(rq->irq, NULL);
        free_irq(rq->irq, rxq);
        rx_del_napi(rxq);
 }
index ed6dbcfd4e96fe8c59a668c4179fdbe15ced4a80..b151ae316546c2483aa91abfabc900b608e53e4a 100644 (file)
@@ -2199,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
        return true;
 }
 
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS          0
+#define I40E_XDP_CONSUMED      BIT(0)
+#define I40E_XDP_TX            BIT(1)
+#define I40E_XDP_REDIR         BIT(2)
 
 static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
                              struct i40e_ring *xdp_ring);
@@ -2248,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
                break;
        case XDP_REDIRECT:
                err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
-               result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+               result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
                break;
        default:
                bpf_warn_invalid_xdp_action(act);
@@ -2311,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
        struct sk_buff *skb = rx_ring->skb;
        u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
-       bool failure = false, xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
+       bool failure = false;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2372,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -I40E_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2427,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & I40E_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & I40E_XDP_TX) {
                struct i40e_ring *xdp_ring =
                        rx_ring->vsi->xdp_rings[rx_ring->queue_index];
 
                i40e_xdp_ring_update_tail(xdp_ring);
-               xdp_do_flush_map();
        }
 
        rx_ring->skb = skb;
index 3e87dbbc90246dba3a59e3f8ccded5885b441ae2..62e57b05a0aed3d9a02bf8d473aa49505608728f 100644 (file)
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
        return skb;
 }
 
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS         0
+#define IXGBE_XDP_CONSUMED     BIT(0)
+#define IXGBE_XDP_TX           BIT(1)
+#define IXGBE_XDP_REDIR                BIT(2)
 
 static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
                               struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
        case XDP_REDIRECT:
                err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
                if (!err)
-                       result = IXGBE_XDP_TX;
+                       result = IXGBE_XDP_REDIR;
                else
                        result = IXGBE_XDP_CONSUMED;
                break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        unsigned int mss = 0;
 #endif /* IXGBE_FCOE */
        u16 cleaned_count = ixgbe_desc_unused(rx_ring);
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
        struct xdp_buff xdp;
 
        xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
                if (IS_ERR(skb)) {
-                       if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
-                               xdp_xmit = true;
+                       unsigned int xdp_res = -PTR_ERR(skb);
+
+                       if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+                               xdp_xmit |= xdp_res;
                                ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
                        } else {
                                rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_packets++;
        }
 
-       if (xdp_xmit) {
+       if (xdp_xmit & IXGBE_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & IXGBE_XDP_TX) {
                struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
 
                /* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                 */
                wmb();
                writel(ring->next_to_use, ring->tail);
-
-               xdp_do_flush_map();
        }
 
        u64_stats_update_begin(&rx_ring->syncp);
index 487388aed98f22cc9ae814fd60d27b48d5105458..384c1fa490811ee651919c139b9cd9e724d4ff81 100644 (file)
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
        unsigned long flags;
        bool poll_cmd = ent->polling;
        int alloc_ret;
+       int cmd_mode;
 
        sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
        down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
        set_signature(ent, !cmd->checksum_disabled);
        dump_command(dev, ent, 1);
        ent->ts1 = ktime_get_ns();
+       cmd_mode = cmd->mode;
 
        if (ent->callback)
                schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
        iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
        mmiowb();
        /* if not in polling don't use ent after this point */
-       if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
+       if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
                poll_timeout(ent);
                /* make sure we read the descriptor after ownership is SW */
                rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
 {
        struct mlx5_core_dev *dev = filp->private_data;
        struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
-       char outlen_str[8];
+       char outlen_str[8] = {0};
        int outlen;
        void *ptr;
        int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
        if (copy_from_user(outlen_str, buf, count))
                return -EFAULT;
 
-       outlen_str[7] = 0;
-
        err = sscanf(outlen_str, "%d", &outlen);
        if (err < 0)
                return err;
index 56c1b6f5593e053d4629b15635bacf1ece9d6a88..dae4156a710ddc60467999ab56c67b7ff31914db 100644 (file)
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        mlx5e_activate_channels(&priv->channels);
        netif_tx_start_all_queues(priv->netdev);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_add_sqs_fwd_rules(priv);
 
        mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
 {
        mlx5e_redirect_rqts_to_drop(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_remove_sqs_fwd_rules(priv);
 
        /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
        mlx5e_set_netdev_dev_addr(netdev);
 
 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
-       if (MLX5_VPORT_MANAGER(mdev))
+       if (MLX5_ESWITCH_MANAGER(mdev))
                netdev->switchdev_ops = &mlx5e_switchdev_ops;
 #endif
 
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
 
        mlx5e_enable_async_events(priv);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_register_vport_reps(priv);
 
        if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
 
        queue_work(priv->wq, &priv->set_rx_mode_work);
 
-       if (MLX5_VPORT_MANAGER(priv->mdev))
+       if (MLX5_ESWITCH_MANAGER(priv->mdev))
                mlx5e_unregister_vport_reps(priv);
 
        mlx5e_disable_async_events(priv);
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
                return NULL;
 
 #ifdef CONFIG_MLX5_ESWITCH
-       if (MLX5_VPORT_MANAGER(mdev)) {
+       if (MLX5_ESWITCH_MANAGER(mdev)) {
                rpriv = mlx5e_alloc_nic_rep_priv(mdev);
                if (!rpriv) {
                        mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
index 57987f6546e8357bdfaeb3e657e0f07fe47d940a..2b8040a3cdbd7c2f74bb854bd8141ba379ea37de 100644 (file)
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct mlx5_eswitch_rep *rep;
 
-       if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
                return false;
 
        rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
 {
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
-       struct mlx5_eswitch_rep *rep = rpriv->rep;
+       struct mlx5_eswitch_rep *rep;
 
+       if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+               return false;
+
+       rep = rpriv->rep;
        if (rep && rep->vport != FDB_UPLINK_VPORT)
                return true;
 
index f63dfbcd29fea1efc2237d6dcecdbdd74259e1a0..b79d74860a304669eb4e05cf03fa0a267213926a 100644 (file)
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
 }
 
 /* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
+#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+
 
 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
 {
        int err;
        int i, enabled_events;
 
-       if (!ESW_ALLOWED(esw))
-               return 0;
-
-       if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+       if (!ESW_ALLOWED(esw) ||
            !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
                esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
                return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
        u64 node_guid;
        int err = 0;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
                return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
 {
        struct mlx5_vport *evport;
 
-       if (!ESW_ALLOWED(esw))
+       if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
                return -EPERM;
        if (!LEGAL_VPORT(esw, vport))
                return -EINVAL;
index cecd201f0b73ab8a42693a79070c21bcc850d6e4..91f1209886ffdbb37af33ac32369f312296f8bfa 100644 (file)
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
        if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
                return -EOPNOTSUPP;
 
-       if (!MLX5_CAP_GEN(dev, vport_group_manager))
-               return -EOPNOTSUPP;
+       if(!MLX5_ESWITCH_MANAGER(dev))
+               return -EPERM;
 
        if (dev->priv.eswitch->mode == SRIOV_NONE)
                return -EOPNOTSUPP;
index 49a75d31185ecf25ff93c5f3a9beec6b48be28a1..f1a86cea86a0e24c5128e50f23bbba8a33112b7d 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mutex.h>
 #include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
 
 #include "mlx5_core.h"
 #include "fs_core.h"
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
                        goto err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
                        err = init_fdb_root_ns(steering);
                        if (err)
index afd9f4fa22f40b70506fafa49f29cf647c22a959..41ad24f0de2cf9d171e586df3b9d167515d3cb03 100644 (file)
@@ -32,6 +32,7 @@
 
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/cmd.h>
+#include <linux/mlx5/eswitch.h>
 #include <linux/module.h>
 #include "mlx5_core.h"
 #include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
        }
 
        if (MLX5_CAP_GEN(dev, vport_group_manager) &&
-           MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+           MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
                if (err)
                        return err;
        }
 
-       if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+       if (MLX5_ESWITCH_MANAGER(dev)) {
                err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
                if (err)
                        return err;
index 7cb67122e8b5f04371651e1c1e2757acb281a36e..98359559c77e4286df95df17651a4b9f2ca8e427 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/etherdevice.h>
 #include <linux/mlx5/driver.h>
 #include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/eswitch.h>
 #include "mlx5_core.h"
 #include "lib/mpfs.h"
 
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
        int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
        struct mlx5_mpfs *mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_mpfs *mpfs = dev->priv.mpfs;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return;
 
        WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
        u32 index;
        int err;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
        int err = 0;
        u32 index;
 
-       if (!MLX5_VPORT_MANAGER(dev))
+       if (!MLX5_ESWITCH_MANAGER(dev))
                return 0;
 
        mutex_lock(&mpfs->lock);
index fa9d0760dd36ffda5c2c439f12bbdffab6320ccd..31a9cbd85689b01fc0bfe9e6c221d73cc7c5fe13 100644 (file)
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
 static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
                                   int inlen)
 {
-       u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 out[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
 static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
                                     int outlen)
 {
-       u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+       u32 in[MLX5_ST_SZ_DW(qetc_reg)];
 
        if (!MLX5_CAP_GEN(mdev, ets))
                return -EOPNOTSUPP;
index 2a8b529ce6dd176cbc29b9bb4b74cd1d1c48f671..a0674962f02c4d2a35d05c98f84436967703101c 100644 (file)
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return -EBUSY;
        }
 
+       if (!MLX5_ESWITCH_MANAGER(dev))
+               goto enable_vfs_hca;
+
        err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
        if (err) {
                mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
                return err;
        }
 
+enable_vfs_hca:
        for (vf = 0; vf < num_vfs; vf++) {
                err = mlx5_core_enable_hca(dev, vf + 1);
                if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
        }
 
 out:
-       mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+       if (MLX5_ESWITCH_MANAGER(dev))
+               mlx5_eswitch_disable_sriov(dev->priv.eswitch);
 
        if (mlx5_wait_for_vf_pages(dev))
                mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
index 719cecb182c6c4eb5579eb1b36601acb6c0d0c5c..7eecd5b07bb1931bf3041b1ae12b0f3f5154405a 100644 (file)
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                return -EINVAL;
        if (!MLX5_CAP_GEN(mdev, vport_group_manager))
                return -EACCES;
-       if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
-               return -EOPNOTSUPP;
 
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
index fcdfb8e7fdeab0b9dcb353f4cd4a7d76370c9817..40216d56dddcb73d997ed4e4c48e63868610da89 100644 (file)
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
 
        ret = nfp_net_bpf_offload(nn, prog, running, extack);
        /* Stop offload if replace not possible */
-       if (ret && prog)
-               nfp_bpf_xdp_offload(app, nn, NULL, extack);
+       if (ret)
+               return ret;
 
-       nn->dp.bpf_offload_xdp = prog && !ret;
+       nn->dp.bpf_offload_xdp = !!prog;
        return ret;
 }
 
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index 91935405f5861678077c188328d365ed5cb2ba7f..84f7a5dbea9d5bf17abd88416cc5a41f2fa4770b 100644 (file)
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
                         NFP_FLOWER_MASK_MPLS_Q;
 
                frame->mpls_lse = cpu_to_be32(t_mpls);
+       } else if (dissector_uses_key(flow->dissector,
+                                     FLOW_DISSECTOR_KEY_BASIC)) {
+               /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
+                * bit, which indicates an mpls ether type but without any
+                * mpls fields.
+                */
+               struct flow_dissector_key_basic *key_basic;
+
+               key_basic = skb_flow_dissector_target(flow->dissector,
+                                                     FLOW_DISSECTOR_KEY_BASIC,
+                                                     flow->key);
+               if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+                   key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
+                       frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
        }
 }
 
index c42e64f32333f84640ff913b61ff199701e1b404..525057bee0ed8978f360d6eeb8293d8a990a0f22 100644 (file)
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
                case cpu_to_be16(ETH_P_ARP):
                        return -EOPNOTSUPP;
 
+               case cpu_to_be16(ETH_P_MPLS_UC):
+               case cpu_to_be16(ETH_P_MPLS_MC):
+                       if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+                               key_layer |= NFP_FLOWER_LAYER_MAC;
+                               key_size += sizeof(struct nfp_flower_mac_mpls);
+                       }
+                       break;
+
                /* Will be included in layer 2. */
                case cpu_to_be16(ETH_P_8021Q):
                        break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
        if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
                return -EOPNOTSUPP;
 
+       if (tcf_block_shared(f->block))
+               return -EOPNOTSUPP;
+
        switch (f->command) {
        case TC_BLOCK_BIND:
                return tcf_block_cb_register(f->block,
index cd34097b79f1be9d313d8f28b9701bb5bd6a3100..37a6d7822a3860647c416efeff47c7a7837a3a85 100644 (file)
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
        err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
                           nfp_resource_address(state->res),
                           fwinf, sizeof(*fwinf));
-       if (err < sizeof(*fwinf))
+       if (err < (int)sizeof(*fwinf))
                goto err_release;
 
        if (!nffw_res_flg_init_get(fwinf))
index f0b01385d5cb51ac2127ec88460d2231e62df33f..e0680ce9132815568914dff86606363b9a02cb88 100644 (file)
@@ -709,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
        p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
-              ARRAY_SIZE(p_local->local_chassis_id));
+              sizeof(p_local->local_chassis_id));
        memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
-              ARRAY_SIZE(p_local->local_port_id));
+              sizeof(p_local->local_port_id));
 }
 
 static void
@@ -723,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
        p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
 
        memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
-              ARRAY_SIZE(p_remote->peer_chassis_id));
+              sizeof(p_remote->peer_chassis_id));
        memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
-              ARRAY_SIZE(p_remote->peer_port_id));
+              sizeof(p_remote->peer_port_id));
 }
 
 static int
index 329781cda77fbecc88328ea95f00e39d4be5db9b..e5249b4741d03f7c347c70a861288b787653741a 100644 (file)
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
                        DP_INFO(p_hwfn, "Failed to update driver state\n");
 
                rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
-                                              QED_OV_ESWITCH_VEB);
+                                              QED_OV_ESWITCH_NONE);
                if (rc)
                        DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
        }
index 5c10fd7210c37ff658d312a6f57f4f7e8b69b77c..0cbc74d6ca8b4914b01c4e12a253a59dcee21bf8 100644 (file)
@@ -789,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
        /* We want a minimum of one slowpath and one fastpath vector per hwfn */
        cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
 
+       if (is_kdump_kernel()) {
+               DP_INFO(cdev,
+                       "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+                       cdev->int_params.in.min_msix_cnt);
+               cdev->int_params.in.num_vectors =
+                       cdev->int_params.in.min_msix_cnt;
+       }
+
        rc = qed_set_int_mode(cdev, false);
        if (rc)  {
                DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
index f01bf52bc381f6f02c33ee3d9df4a90982cf8245..fd59cf45f4be8cb008d8728398870cc1ab41210b 100644 (file)
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
 static int qed_sriov_enable(struct qed_dev *cdev, int num)
 {
        struct qed_iov_vf_init_params params;
+       struct qed_hwfn *hwfn;
+       struct qed_ptt *ptt;
        int i, j, rc;
 
        if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
 
        /* Initialize HW for VF access */
        for_each_hwfn(cdev, j) {
-               struct qed_hwfn *hwfn = &cdev->hwfns[j];
-               struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+               hwfn = &cdev->hwfns[j];
+               ptt = qed_ptt_acquire(hwfn);
 
                /* Make sure not to use more than 16 queues per VF */
                params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
                goto err;
        }
 
+       hwfn = QED_LEADING_HWFN(cdev);
+       ptt = qed_ptt_acquire(hwfn);
+       if (!ptt) {
+               DP_ERR(hwfn, "Failed to acquire ptt\n");
+               rc = -EBUSY;
+               goto err;
+       }
+
+       rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+       if (rc)
+               DP_INFO(cdev, "Failed to update eswitch mode\n");
+       qed_ptt_release(hwfn, ptt);
+
        return num;
 
 err:
index 02adb513f4756cb58c423936213bdcb4158d1dfa..013ff567283c738f342ca5d6f5358e30ca6daa72 100644 (file)
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
 {
        struct qede_ptp *ptp = edev->ptp;
 
-       if (!ptp)
-               return -EIO;
+       if (!ptp) {
+               info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                       SOF_TIMESTAMPING_RX_SOFTWARE |
+                                       SOF_TIMESTAMPING_SOFTWARE;
+               info->phc_index = -1;
+
+               return 0;
+       }
 
        info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
                                SOF_TIMESTAMPING_RX_SOFTWARE |
index 8edf20967c82c583bb59ace5f1f9c30dcfd1530d..e045a5d6b938f43f391a726f301d8911f156b32c 100644 (file)
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
        if (!state)
                return -ENOMEM;
        efx->filter_state = state;
+       init_rwsem(&state->lock);
 
        table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
        table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
index d37f17ca62fecf66a6b5af1c9aa105923310a341..65bc3556bd8f8c25b9b37421c80d6a663d8eb0db 100644 (file)
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
        }
 }
 
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+       u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+       value &= ~DMA_RBSZ_MASK;
+       value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+       writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
 const struct stmmac_dma_ops dwmac4_dma_ops = {
        .reset = dwmac4_dma_reset,
        .init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
 
 const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
        .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
        .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
        .enable_tso = dwmac4_enable_tso,
+       .set_bfsize = dwmac4_set_bfsize,
 };
index c63c1fe3f26b9e4d5cb714ea3ceed56bf103b17e..22a4a6dbb1a4af42d3d7467e3ebca50efef57986 100644 (file)
 
 /* DMA Rx Channel X Control register defines */
 #define DMA_CONTROL_SR                 BIT(0)
+#define DMA_RBSZ_MASK                  GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT                 1
 
 /* Interrupt status per channel */
 #define DMA_CHAN_STATUS_REB            GENMASK(21, 19)
index e44e7b26ce829be0eff000c6a68b064139d532b8..fe8b536b13f864bfff723ea2236a3e5982026533 100644 (file)
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
        void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
        void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+       void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
 };
 
 #define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
        stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
 #define stmmac_enable_tso(__priv, __args...) \
        stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+       stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
 
 struct mac_device_info;
 struct net_device;
index cba46b62a1cdd9571c9b82bb35503bc652c2782e..60f59abab009e6fcb077eeccacb545dddad47fa6 100644 (file)
@@ -1804,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 
                stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
                                rxfifosz, qmode);
+               stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
+                               chan);
        }
 
        for (chan = 0; chan < tx_channels_count; chan++) {
index 750eaa53bf0ce59429d524ba0658ad6f488a4ba0..ada33c2d9ac20e01af4acec33727623204fda803 100644 (file)
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 1a924b867b0742b0aa3e5a15f4da3e6885173e74..4b6e308199d270cd455b7df0de20a8458f6b7941 100644 (file)
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
 void netvsc_channel_cb(void *context);
 int netvsc_poll(struct napi_struct *napi, int budget);
 
-void rndis_set_subchannel(struct work_struct *w);
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
 int rndis_filter_open(struct netvsc_device *nvdev);
 int rndis_filter_close(struct netvsc_device *nvdev);
 struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
index 5d5bd513847fff4ff353e7c58d9967a354d06955..8e9d0ee1572b97120546171f2487240e6e21bc43 100644 (file)
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
                               VM_PKT_DATA_INBAND, 0);
 }
 
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+       struct netvsc_device *nvdev =
+               container_of(w, struct netvsc_device, subchan_work);
+       struct rndis_device *rdev;
+       int i, ret;
+
+       /* Avoid deadlock with device removal already under RTNL */
+       if (!rtnl_trylock()) {
+               schedule_work(w);
+               return;
+       }
+
+       rdev = nvdev->extension;
+       if (rdev) {
+               ret = rndis_set_subchannel(rdev->ndev, nvdev);
+               if (ret == 0) {
+                       netif_device_attach(rdev->ndev);
+               } else {
+                       /* fallback to only primary channel */
+                       for (i = 1; i < nvdev->num_chn; i++)
+                               netif_napi_del(&nvdev->chan_table[i].napi);
+
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       rtnl_unlock();
+}
+
 static struct netvsc_device *alloc_net_device(void)
 {
        struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
 
        init_completion(&net_device->channel_init_wait);
        init_waitqueue_head(&net_device->subchan_open);
-       INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
+       INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
 
        return net_device;
 }
index fe2256bf1d137fea6b76c5e3a564b191e2b5da7c..dd1d6e115145d4c14fb25d1883d1e42614e211a9 100644 (file)
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
        if (IS_ERR(nvdev))
                return PTR_ERR(nvdev);
 
-       /* Note: enable and attach happen when sub-channels setup */
+       if (nvdev->num_chn > 1) {
+               ret = rndis_set_subchannel(ndev, nvdev);
+
+               /* if unavailable, just proceed with one queue */
+               if (ret) {
+                       nvdev->max_chn = 1;
+                       nvdev->num_chn = 1;
+               }
+       }
+
+       /* In any case device is now ready */
+       netif_device_attach(ndev);
 
+       /* Note: enable and attach happen when sub-channels setup */
        netif_carrier_off(ndev);
 
        if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
 
        memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
 
+       if (nvdev->num_chn > 1)
+               schedule_work(&nvdev->subchan_work);
+
        /* hw_features computed in rndis_netdev_set_hwcaps() */
        net->features = net->hw_features |
                NETIF_F_HIGHDMA | NETIF_F_SG |
index 5428bb26110262fdfb66daaac8463c91e7981d42..9b4e3c3787e5d33ff200a9711b1373b765b6052e 100644 (file)
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
  * This breaks overlap of processing the host message for the
  * new primary channel with the initialization of sub-channels.
  */
-void rndis_set_subchannel(struct work_struct *w)
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
 {
-       struct netvsc_device *nvdev
-               = container_of(w, struct netvsc_device, subchan_work);
        struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
-       struct net_device_context *ndev_ctx;
-       struct rndis_device *rdev;
-       struct net_device *ndev;
-       struct hv_device *hv_dev;
+       struct net_device_context *ndev_ctx = netdev_priv(ndev);
+       struct hv_device *hv_dev = ndev_ctx->device_ctx;
+       struct rndis_device *rdev = nvdev->extension;
        int i, ret;
 
-       if (!rtnl_trylock()) {
-               schedule_work(w);
-               return;
-       }
-
-       rdev = nvdev->extension;
-       if (!rdev)
-               goto unlock;    /* device was removed */
-
-       ndev = rdev->ndev;
-       ndev_ctx = netdev_priv(ndev);
-       hv_dev = ndev_ctx->device_ctx;
+       ASSERT_RTNL();
 
        memset(init_packet, 0, sizeof(struct nvsp_message));
        init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
                               VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
        if (ret) {
                netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
-               goto failed;
+               return ret;
        }
 
        wait_for_completion(&nvdev->channel_init_wait);
        if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
                netdev_err(ndev, "sub channel request failed\n");
-               goto failed;
+               return -EIO;
        }
 
        nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
        for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
                ndev_ctx->tx_table[i] = i % nvdev->num_chn;
 
-       netif_device_attach(ndev);
-       rtnl_unlock();
-       return;
-
-failed:
-       /* fallback to only primary channel */
-       for (i = 1; i < nvdev->num_chn; i++)
-               netif_napi_del(&nvdev->chan_table[i].napi);
-
-       nvdev->max_chn = 1;
-       nvdev->num_chn = 1;
-
-       netif_device_attach(ndev);
-unlock:
-       rtnl_unlock();
+       return 0;
 }
 
 static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,12 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
                netif_napi_add(net, &net_device->chan_table[i].napi,
                               netvsc_poll, NAPI_POLL_WEIGHT);
 
-       if (net_device->num_chn > 1)
-               schedule_work(&net_device->subchan_work);
+       return net_device;
 
 out:
-       /* if unavailable, just proceed with one queue */
-       if (ret) {
-               net_device->max_chn = 1;
-               net_device->num_chn = 1;
-       }
-
-       /* No sub channels, device is ready */
-       if (net_device->num_chn == 1)
-               netif_device_attach(net);
-
-       return net_device;
+       /* setting up multiple channels failed */
+       net_device->max_chn = 1;
+       net_device->num_chn = 1;
 
 err_dev_remv:
        rndis_filter_device_remove(dev, net_device);
index 23c1d66002413d611c6f1939c3e75308e803c106..4a949569ec4c51668fe7b795caef7ece5d61854b 100644 (file)
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
 {
        struct ipvl_dev *ipvlan;
        struct net_device *mdev = port->dev;
-       int err = 0;
+       unsigned int flags;
+       int err;
 
        ASSERT_RTNL();
        if (port->mode != nval) {
+               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+                       flags = ipvlan->dev->flags;
+                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags | IFF_NOARP);
+                       } else {
+                               err = dev_change_flags(ipvlan->dev,
+                                                      flags & ~IFF_NOARP);
+                       }
+                       if (unlikely(err))
+                               goto fail;
+               }
                if (nval == IPVLAN_MODE_L3S) {
                        /* New mode is L3S */
                        err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
                                mdev->l3mdev_ops = &ipvl_l3mdev_ops;
                                mdev->priv_flags |= IFF_L3MDEV_MASTER;
                        } else
-                               return err;
+                               goto fail;
                } else if (port->mode == IPVLAN_MODE_L3S) {
                        /* Old mode was L3S */
                        mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
                        ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
                        mdev->l3mdev_ops = NULL;
                }
-               list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
-                       if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
-                               ipvlan->dev->flags |= IFF_NOARP;
-                       else
-                               ipvlan->dev->flags &= ~IFF_NOARP;
-               }
                port->mode = nval;
        }
+       return 0;
+
+fail:
+       /* Undo the flags changes that have been done so far. */
+       list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+               flags = ipvlan->dev->flags;
+               if (port->mode == IPVLAN_MODE_L3 ||
+                   port->mode == IPVLAN_MODE_L3S)
+                       dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+               else
+                       dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+       }
+
        return err;
 }
 
index 081d99aa39853097e7d486e813f344fb895598aa..49ac678eb2dc7ca6539794b9ace40ba86aaa8d6a 100644 (file)
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
                if (err < 0)
                        return err;
 
-               err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+               err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
        }
 
        return err;
index 8dff87ec6d99c5dca122dcdb5d3697157564cfa2..2e4130746c40caf6a2a5dbf51fd0ece84fb6da85 100644 (file)
@@ -64,6 +64,7 @@
 #define DEFAULT_RX_CSUM_ENABLE         (true)
 #define DEFAULT_TSO_CSUM_ENABLE                (true)
 #define DEFAULT_VLAN_FILTER_ENABLE     (true)
+#define DEFAULT_VLAN_RX_OFFLOAD                (true)
 #define TX_OVERHEAD                    (8)
 #define RXW_PADDING                    2
 
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
        if ((ll_mtu % dev->maxpacket) == 0)
                return -EDOM;
 
-       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
 
        netdev->mtu = new_mtu;
 
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
        }
 
        if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
+       else
+               pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
                pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
        else
                pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
        buf |= FCT_TX_CTL_EN_;
        ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
 
-       ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+       ret = lan78xx_set_rx_max_frame_length(dev,
+                                             dev->net->mtu + VLAN_ETH_HLEN);
 
        ret = lan78xx_read_reg(dev, MAC_RX, &buf);
        buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
        if (DEFAULT_TSO_CSUM_ENABLE)
                dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
 
+       if (DEFAULT_VLAN_RX_OFFLOAD)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+       if (DEFAULT_VLAN_FILTER_ENABLE)
+               dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
        dev->net->hw_features = dev->net->features;
 
        ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
                                    struct sk_buff *skb,
                                    u32 rx_cmd_a, u32 rx_cmd_b)
 {
+       /* HW Checksum offload appears to be flawed if used when not stripping
+        * VLAN headers. Drop back to S/W checksums under these conditions.
+        */
        if (!(dev->net->features & NETIF_F_RXCSUM) ||
-           unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+           unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+           ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+            !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
                skb->ip_summed = CHECKSUM_NONE;
        } else {
                skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
        }
 }
 
+static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
+                                   struct sk_buff *skb,
+                                   u32 rx_cmd_a, u32 rx_cmd_b)
+{
+       if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+           (rx_cmd_a & RX_CMD_A_FVTG_))
+               __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+                                      (rx_cmd_b & 0xffff));
+}
+
 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
 {
        int             status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        if (skb->len == size) {
                                lan78xx_rx_csum_offload(dev, skb,
                                                        rx_cmd_a, rx_cmd_b);
+                               lan78xx_rx_vlan_offload(dev, skb,
+                                                       rx_cmd_a, rx_cmd_b);
 
                                skb_trim(skb, skb->len - 4); /* remove fcs */
                                skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
                        skb_set_tail_pointer(skb2, size);
 
                        lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+                       lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
 
                        skb_trim(skb2, skb2->len - 4); /* remove fcs */
                        skb2->truesize = size + sizeof(struct sk_buff);
index 86f7196f9d91fbf55c791fff88687a43518d66d8..2a58607a6aea809b14e0aa03955cfa099118e607 100644 (file)
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
 #ifdef CONFIG_PM_SLEEP
        unregister_pm_notifier(&tp->pm_notifier);
 #endif
-       napi_disable(&tp->napi);
+       if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+               napi_disable(&tp->napi);
        clear_bit(WORK_ENABLE, &tp->flags);
        usb_kill_urb(tp->intr_urb);
        cancel_delayed_work_sync(&tp->schedule);
index b6c9a2af37328d1037c3b0ba761256092556167e..53085c63277b4ecfa9d8651543bfb5e545fa73ee 100644 (file)
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
 /* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
 #define VIRTIO_XDP_HEADROOM 256
 
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX          BIT(0)
+#define VIRTIO_XDP_REDIR       BIT(1)
+
 /* RX packet size EWMA. The average packet size is used to determine the packet
  * buffer size when refilling RX rings. As the entire RX ring may be refilled
  * at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                     struct receive_queue *rq,
                                     void *buf, void *ctx,
                                     unsigned int len,
-                                    bool *xdp_xmit)
+                                    unsigned int *xdp_xmit)
 {
        struct sk_buff *skb;
        struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
                                trace_xdp_exception(vi->dev, xdp_prog, act);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        rcu_read_unlock();
                        goto xdp_xmit;
                case XDP_REDIRECT:
                        err = xdp_do_redirect(dev, &xdp, xdp_prog);
                        if (err)
                                goto err_xdp;
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        rcu_read_unlock();
                        goto xdp_xmit;
                default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                         void *buf,
                                         void *ctx,
                                         unsigned int len,
-                                        bool *xdp_xmit)
+                                        unsigned int *xdp_xmit)
 {
        struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
        u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_TX;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
                                        put_page(xdp_page);
                                goto err_xdp;
                        }
-                       *xdp_xmit = true;
+                       *xdp_xmit |= VIRTIO_XDP_REDIR;
                        if (unlikely(xdp_page != page))
                                put_page(page);
                        rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
 }
 
 static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
-                      void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+                      void *buf, unsigned int len, void **ctx,
+                      unsigned int *xdp_xmit)
 {
        struct net_device *dev = vi->dev;
        struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
        }
 }
 
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+                          unsigned int *xdp_xmit)
 {
        struct virtnet_info *vi = rq->vq->vdev->priv;
        unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        struct virtnet_info *vi = rq->vq->vdev->priv;
        struct send_queue *sq;
        unsigned int received, qp;
-       bool xdp_xmit = false;
+       unsigned int xdp_xmit = 0;
 
        virtnet_poll_cleantx(rq);
 
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
        if (received < budget)
                virtqueue_napi_complete(napi, rq->vq, received);
 
-       if (xdp_xmit) {
+       if (xdp_xmit & VIRTIO_XDP_REDIR)
+               xdp_do_flush_map();
+
+       if (xdp_xmit & VIRTIO_XDP_TX) {
                qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
                     smp_processor_id();
                sq = &vi->sq[qp];
                virtqueue_kick(sq->vq);
-               xdp_do_flush_map();
        }
 
        return received;
index aee0e60471f10d59c39ad39f8170eedea722455d..f6bb1d54d4bdec833b4104134d3744a7a11af312 100644 (file)
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
        flush = 0;
 
 out:
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
index 2a5fec55bf60f6f30fd684e1c8c5c9a594aa8e75..a246a618f9a497047e4a81614f38da1eb295ef0b 100644 (file)
@@ -829,6 +829,17 @@ struct qeth_trap_id {
 /*some helper functions*/
 #define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
 
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+                                         unsigned int elements)
+{
+       unsigned int i;
+
+       for (i = 0; i < elements; i++)
+               memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+       buf->element[14].sflags = 0;
+       buf->element[15].sflags = 0;
+}
+
 /**
  * qeth_get_elements_for_range() -     find number of SBALEs to cover range.
  * @start:                             Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
                                                 __u16, __u16,
                                                 enum qeth_prot_versions);
 int qeth_set_features(struct net_device *, netdev_features_t);
-void qeth_recover_features(struct net_device *dev);
+void qeth_enable_hw_features(struct net_device *dev);
 netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
 netdev_features_t qeth_features_check(struct sk_buff *skb,
                                      struct net_device *dev,
index 8e1474f1ffacfb22b773b02aa1bff6ff91c61ce9..d01ac29fd986d82b84b7215c5268c37e94aaadaa 100644 (file)
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf,
                enum iucv_tx_notify notification);
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-               struct qeth_qdio_out_buffer *buf,
-               enum qeth_qdio_buffer_states newbufstate);
 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
 
 struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        struct qaob *aob;
        struct qeth_qdio_out_buffer *buffer;
        enum iucv_tx_notify notification;
+       unsigned int i;
 
        aob = (struct qaob *) phys_to_virt(phys_aob_addr);
        QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
        qeth_notify_skbs(buffer->q, buffer, notification);
 
        buffer->aob = NULL;
-       qeth_clear_output_buffer(buffer->q, buffer,
-                                QETH_QDIO_BUF_HANDLED_DELAYED);
+       /* Free dangling allocations. The attached skbs are handled by
+        * qeth_cleanup_handled_pending().
+        */
+       for (i = 0;
+            i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+            i++) {
+               if (aob->sba[i] && buffer->is_header[i])
+                       kmem_cache_free(qeth_core_header_cache,
+                                       (void *) aob->sba[i]);
+       }
+       atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
 
-       /* from here on: do not touch buffer anymore */
        qdio_release_aob(aob);
 }
 
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
                        QETH_CARD_TEXT(queue->card, 5, "aob");
                        QETH_CARD_TEXT_(queue->card, 5, "%lx",
                                        virt_to_phys(buffer->aob));
+
+                       /* prepare the queue slot for re-use: */
+                       qeth_scrub_qdio_buffer(buffer->buffer,
+                                              QETH_MAX_BUFFER_ELEMENTS(card));
                        if (qeth_init_qdio_out_buf(queue, bidx)) {
                                QETH_CARD_TEXT(card, 2, "outofbuf");
                                qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
                goto out;
        }
 
-       ccw_device_get_id(CARD_RDEV(card), &id);
+       ccw_device_get_id(CARD_DDEV(card), &id);
        request->resp_buf_len = sizeof(*response);
        request->resp_version = DIAG26C_VERSION2;
        request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
 #define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
                          NETIF_F_IPV6_CSUM)
 /**
- * qeth_recover_features() - Restore device features after recovery
- * @dev:       the recovering net_device
- *
- * Caller must hold rtnl lock.
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev:       a net_device
  */
-void qeth_recover_features(struct net_device *dev)
+void qeth_enable_hw_features(struct net_device *dev)
 {
-       netdev_features_t features = dev->features;
        struct qeth_card *card = dev->ml_priv;
+       netdev_features_t features;
 
+       rtnl_lock();
+       features = dev->features;
        /* force-off any feature that needs an IPA sequence.
         * netdev_update_features() will restart them.
         */
        dev->features &= ~QETH_HW_FEATURES;
        netdev_update_features(dev);
-
-       if (features == dev->features)
-               return;
-       dev_warn(&card->gdev->dev,
-                "Device recovery failed to restore all offload features\n");
+       if (features != dev->features)
+               dev_warn(&card->gdev->dev,
+                        "Device recovery failed to restore all offload features\n");
+       rtnl_unlock();
 }
-EXPORT_SYMBOL_GPL(qeth_recover_features);
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
 
 int qeth_set_features(struct net_device *dev, netdev_features_t features)
 {
index a7cb37da6a21313eda8d03119135f1475d35f47d..2487f0aeb165c1afae905540d1ff547f7fab4f54 100644 (file)
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
 
 static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
        int rc;
 
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
 
 static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
 {
-       enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+       enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
                                        IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
        int rc;
 
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
                return -ERESTARTSYS;
        }
 
+       /* avoid racing against concurrent state change: */
+       if (!mutex_trylock(&card->conf_mutex))
+               return -EAGAIN;
+
        if (!qeth_card_hw_is_reachable(card)) {
                ether_addr_copy(dev->dev_addr, addr->sa_data);
-               return 0;
+               goto out_unlock;
        }
 
        /* don't register the same address twice */
        if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
            (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
-               return 0;
+               goto out_unlock;
 
        /* add the new address, switch over, drop the old */
        rc = qeth_l2_send_setmac(card, addr->sa_data);
        if (rc)
-               return rc;
+               goto out_unlock;
        ether_addr_copy(old_addr, dev->dev_addr);
        ether_addr_copy(dev->dev_addr, addr->sa_data);
 
        if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
                qeth_l2_remove_mac(card, old_addr);
        card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
-       return 0;
+
+out_unlock:
+       mutex_unlock(&card->conf_mutex);
+       return rc;
 }
 
 static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_off(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                if (recovery_mode &&
                    card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                }
                /* this also sets saved unicast addresses */
                qeth_l2_set_rx_mode(card->dev);
-               rtnl_lock();
-               qeth_recover_features(card->dev);
-               rtnl_unlock();
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
index e7fa479adf47e0dd41bfacaed8fd347bc40b5581..5905dc63e2569baf761611ad25bf3b91786a3235 100644 (file)
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                netif_carrier_on(card->dev);
        else
                netif_carrier_off(card->dev);
+
+       qeth_enable_hw_features(card->dev);
        if (recover_flag == CARD_STATE_RECOVER) {
                rtnl_lock();
                if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
                else
                        dev_open(card->dev);
                qeth_l3_set_rx_mode(card->dev);
-               qeth_recover_features(card->dev);
                rtnl_unlock();
        }
        qeth_trace_features(card);
index 123bf7d516fc1f475cb89edb8aade4c2ad556f51..594d192b23317d7e69d068b2f124ca6f77de3e07 100644 (file)
@@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
                                         unsigned long reason)
 {
        struct mm_struct *mm = ctx->mm;
-       pte_t *pte;
+       pte_t *ptep, pte;
        bool ret = true;
 
        VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
 
-       pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
-       if (!pte)
+       ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
+
+       if (!ptep)
                goto out;
 
        ret = false;
+       pte = huge_ptep_get(ptep);
 
        /*
         * Lockless access: we're in a wait_event so it's ok if it
         * changes under us.
         */
-       if (huge_pte_none(*pte))
+       if (huge_pte_none(pte))
                ret = true;
-       if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
+       if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
                ret = true;
 out:
        return ret;
index 975fb4cf1bb743ccff5fae92e82df582533c0ff2..79795c5fa7c37d3281fdc006ac701a4cb17c4cf6 100644 (file)
@@ -188,12 +188,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
                                                                              \
        __ret;                                                                \
 })
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr);
 #else
 
+struct bpf_prog;
 struct cgroup_bpf {};
 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
 
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype,
+                                        struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+                                        enum bpf_prog_type ptype)
+{
+       return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                                       union bpf_attr __user *uattr)
+{
+       return -EINVAL;
+}
+
 #define cgroup_bpf_enabled (0)
 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
index 7df32a3200f740b0a00a85615938178547bb5690..8827e797ff97d0973ddf1d4217a885cee9bb63ee 100644 (file)
@@ -696,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
 struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
 struct sock  *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
 int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog);
 #else
 static inline struct sock  *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
 {
@@ -714,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map,
 {
        return -EOPNOTSUPP;
 }
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                                     struct bpf_prog *prog)
+{
+       return -EINVAL;
+}
 #endif
 
 #if defined(CONFIG_XDP_SOCKETS)
index 5f8a4283092d0a6960fd663a33832221a9615353..9d9ff755ec2972cf6e46d1905e1a5caae9dd5ae6 100644 (file)
@@ -5,11 +5,12 @@
 #include <uapi/linux/bpf.h>
 
 #ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
 int lirc_prog_detach(const union bpf_attr *attr);
 int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
 #else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+                                  struct bpf_prog *prog)
 {
        return -EINVAL;
 }
index 20f2659dd829256d7fef206087ab3262e1e291f5..300baad62c889100722b1778a7c4819a16fbe2d1 100644 (file)
@@ -470,9 +470,7 @@ struct sock_fprog_kern {
 };
 
 struct bpf_binary_header {
-       u16 pages;
-       u16 locked:1;
-
+       u32 pages;
        /* Some arches need word alignment for their instructions */
        u8 image[] __aligned(4);
 };
@@ -481,7 +479,7 @@ struct bpf_prog {
        u16                     pages;          /* Number of allocated pages */
        u16                     jited:1,        /* Is our filter JIT'ed? */
                                jit_requested:1,/* archs need to JIT the prog */
-                               locked:1,       /* Program image locked? */
+                               undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
                                gpl_compatible:1, /* Is filter GPL compatible? */
                                cb_access:1,    /* Is control block accessed? */
                                dst_needed:1,   /* Do we need dst entry? */
@@ -677,46 +675,24 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
 
 static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
 {
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-       fp->locked = 1;
-       if (set_memory_ro((unsigned long)fp, fp->pages))
-               fp->locked = 0;
-#endif
+       fp->undo_set_mem = 1;
+       set_memory_ro((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
 {
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-       if (fp->locked) {
-               WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
-               /* In case set_memory_rw() fails, we want to be the first
-                * to crash here instead of some random place later on.
-                */
-               fp->locked = 0;
-       }
-#endif
+       if (fp->undo_set_mem)
+               set_memory_rw((unsigned long)fp, fp->pages);
 }
 
 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
 {
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-       hdr->locked = 1;
-       if (set_memory_ro((unsigned long)hdr, hdr->pages))
-               hdr->locked = 0;
-#endif
+       set_memory_ro((unsigned long)hdr, hdr->pages);
 }
 
 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
 {
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-       if (hdr->locked) {
-               WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-               /* In case set_memory_rw() fails, we want to be the first
-                * to crash here instead of some random place later on.
-                */
-               hdr->locked = 0;
-       }
-#endif
+       set_memory_rw((unsigned long)hdr, hdr->pages);
 }
 
 static inline struct bpf_binary_header *
@@ -728,22 +704,6 @@ bpf_jit_binary_hdr(const struct bpf_prog *fp)
        return (void *)addr;
 }
 
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline int bpf_prog_check_pages_ro_single(const struct bpf_prog *fp)
-{
-       if (!fp->locked)
-               return -ENOLCK;
-       if (fp->jited) {
-               const struct bpf_binary_header *hdr = bpf_jit_binary_hdr(fp);
-
-               if (!hdr->locked)
-                       return -ENOLCK;
-       }
-
-       return 0;
-}
-#endif
-
 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap);
 static inline int sk_filter(struct sock *sk, struct sk_buff *skb)
 {
index d3c9db492b30065750726992ba1001c48153232b..fab5121ffb8f5de2b5f39b6a0a7e43cca4b047e0 100644 (file)
@@ -8,6 +8,8 @@
 
 #include <linux/mlx5/driver.h>
 
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
 enum {
        SRIOV_NONE,
        SRIOV_LEGACY,
index 27134c4fcb76eb5140ff4828066e73e11d671cd9..ac281f5ec9b8077ba859f33eaf61e3f03ecdeb3d 100644 (file)
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
        u8         vnic_env_queue_counters[0x1];
        u8         ets[0x1];
        u8         nic_flow_table[0x1];
-       u8         eswitch_flow_table[0x1];
+       u8         eswitch_manager[0x1];
        u8         device_memory[0x1];
        u8         mcam_reg[0x1];
        u8         pcam_reg[0x1];
index 3ec9850c7936f01c0f7564dbe519e95ce0849639..3d0cc0b5cec2d7514dbebf32effab9b1e6388c3c 100644 (file)
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
        if (PTR_ERR(pp) != -EINPROGRESS)
                NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       if (PTR_ERR(pp) != -EINPROGRESS) {
+               NAPI_GRO_CB(skb)->flush |= flush;
+               skb_gro_remcsum_cleanup(skb, grc);
+               skb->remcsum_offload = 0;
+       }
+}
 #else
 static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
 {
        NAPI_GRO_CB(skb)->flush |= flush;
 }
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+                                              struct sk_buff **pp,
+                                              int flush,
+                                              struct gro_remcsum *grc)
+{
+       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_remcsum_cleanup(skb, grc);
+       skb->remcsum_offload = 0;
+}
 #endif
 
 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
index 47e35cce3b648d696b127ed7bd643036128795f6..a71264d75d7f98d28f92dfd861ffe6e0d39c0198 100644 (file)
@@ -128,6 +128,7 @@ struct net {
 #endif
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
        struct netns_nf_frag    nf_frag;
+       struct ctl_table_header *nf_frag_frags_hdr;
 #endif
        struct sock             *nfnl;
        struct sock             *nfnl_stash;
index c978a31b0f846210b4c2a369af960d5349b5395a..762ac9931b6251152b6ee0e5780df0f7b073f3e6 100644 (file)
@@ -109,7 +109,6 @@ struct netns_ipv6 {
 
 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
 struct netns_nf_frag {
-       struct netns_sysctl_ipv6 sysctl;
        struct netns_frags      frags;
 };
 #endif
index a3c1a2c47cd4bfd868004548cdf1ef7a361fa4c6..20b059574e600e64838b0bdecfaf6a76e6629d4a 100644 (file)
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
 {
 }
 
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+       return false;
+}
+
 static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
 {
        return NULL;
index 59b19b6a40d73ea6575f8810a6f4345a931c5a01..b7db3261c62d124760e98d9c851c1b01e64bdb03 100644 (file)
@@ -1857,7 +1857,8 @@ union bpf_attr {
  *             is resolved), the nexthop address is returned in ipv4_dst
  *             or ipv6_dst based on family, smac is set to mac address of
  *             egress device, dmac is set to nexthop mac address, rt_metric
- *             is set to metric from route (IPv4/IPv6 only).
+ *             is set to metric from route (IPv4/IPv6 only), and ifindex
+ *             is set to the device index of the nexthop from the FIB lookup.
  *
  *             *plen* argument is the size of the passed in struct.
  *             *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
  *             *ctx* is either **struct xdp_md** for XDP programs or
  *             **struct sk_buff** tc cls_act programs.
  *     Return
- *             Egress device index on success, 0 if packet needs to continue
- *             up the stack for further processing or a negative error in case
- *             of failure.
+ *             * < 0 if any input argument is invalid
+ *             *   0 on success (packet is forwarded, nexthop neighbor exists)
+ *             * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ *             *     packet is not forwarded or needs assist from full stack
  *
  * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
  *     Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
 #define BPF_FIB_LOOKUP_DIRECT  BIT(0)
 #define BPF_FIB_LOOKUP_OUTPUT  BIT(1)
 
+enum {
+       BPF_FIB_LKUP_RET_SUCCESS,      /* lookup successful */
+       BPF_FIB_LKUP_RET_BLACKHOLE,    /* dest is blackholed; can be dropped */
+       BPF_FIB_LKUP_RET_UNREACHABLE,  /* dest is unreachable; can be dropped */
+       BPF_FIB_LKUP_RET_PROHIBIT,     /* dest not allowed; can be dropped */
+       BPF_FIB_LKUP_RET_NOT_FWDED,    /* packet is not forwarded */
+       BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+       BPF_FIB_LKUP_RET_UNSUPP_LWT,   /* fwd requires encapsulation */
+       BPF_FIB_LKUP_RET_NO_NEIGH,     /* no neighbor entry for nh */
+       BPF_FIB_LKUP_RET_FRAG_NEEDED,  /* fragmentation required to fwd */
+};
+
 struct bpf_fib_lookup {
        /* input:  network family for lookup (AF_INET, AF_INET6)
         * output: network family of egress nexthop
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
 
        /* total length of packet from network header - used for MTU check */
        __u16   tot_len;
-       __u32   ifindex;  /* L3 device index for lookup */
+
+       /* input: L3 device index for lookup
+        * output: device index from FIB lookup
+        */
+       __u32   ifindex;
 
        union {
                /* inputs to lookup */
index f7c00bd6f8e49ca9cc4e6ee323b01f718aebd9ec..3d83ee7df381b1def956b5e645376451d797440e 100644 (file)
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
        return ret;
 }
 
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+                          enum bpf_prog_type ptype, struct bpf_prog *prog)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
+                               attr->attach_flags);
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+       struct bpf_prog *prog;
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+       if (IS_ERR(prog))
+               prog = NULL;
+
+       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
+       if (prog)
+               bpf_prog_put(prog);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+                         union bpf_attr __user *uattr)
+{
+       struct cgroup *cgrp;
+       int ret;
+
+       cgrp = cgroup_get_from_fd(attr->query.target_fd);
+       if (IS_ERR(cgrp))
+               return PTR_ERR(cgrp);
+
+       ret = cgroup_bpf_query(cgrp, attr, uattr);
+
+       cgroup_put(cgrp);
+       return ret;
+}
+
 /**
  * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
  * @sk: The socket sending or receiving traffic
index a9e6c04d0f4ad5a105aa8aeff99058479131e9e1..1e5625d46414cc68efe372b2c6a8dab266a24dd6 100644 (file)
@@ -598,8 +598,6 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
        bpf_fill_ill_insns(hdr, size);
 
        hdr->pages = size / PAGE_SIZE;
-       hdr->locked = 0;
-
        hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
                     PAGE_SIZE - sizeof(*hdr));
        start = (get_random_int() % hole) & ~(alignment - 1);
@@ -1450,22 +1448,6 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
        return 0;
 }
 
-static int bpf_prog_check_pages_ro_locked(const struct bpf_prog *fp)
-{
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-       int i, err;
-
-       for (i = 0; i < fp->aux->func_cnt; i++) {
-               err = bpf_prog_check_pages_ro_single(fp->aux->func[i]);
-               if (err)
-                       return err;
-       }
-
-       return bpf_prog_check_pages_ro_single(fp);
-#endif
-       return 0;
-}
-
 static void bpf_prog_select_func(struct bpf_prog *fp)
 {
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
@@ -1524,17 +1506,7 @@ finalize:
         * all eBPF JITs might immediately support all features.
         */
        *err = bpf_check_tail_call(fp);
-       if (*err)
-               return fp;
-
-       /* Checkpoint: at this point onwards any cBPF -> eBPF or
-        * native eBPF program is read-only. If we failed to change
-        * the page attributes (e.g. allocation failure from
-        * splitting large pages), then reject the whole program
-        * in order to guarantee not ending up with any W+X pages
-        * from BPF side in kernel.
-        */
-       *err = bpf_prog_check_pages_ro_locked(fp);
+
        return fp;
 }
 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
index 52a91d816c0eb9a1f9fe96fd77b3ffefd6145149..cf7b6a6dbd1f3bc290eb3f740e46a63ab3eba116 100644 (file)
@@ -72,6 +72,7 @@ struct bpf_htab {
        u32 n_buckets;
        u32 elem_size;
        struct bpf_sock_progs progs;
+       struct rcu_head rcu;
 };
 
 struct htab_elem {
@@ -89,8 +90,8 @@ enum smap_psock_state {
 struct smap_psock_map_entry {
        struct list_head list;
        struct sock **entry;
-       struct htab_elem *hash_link;
-       struct bpf_htab *htab;
+       struct htab_elem __rcu *hash_link;
+       struct bpf_htab __rcu *htab;
 };
 
 struct smap_psock {
@@ -120,6 +121,7 @@ struct smap_psock {
        struct bpf_prog *bpf_parse;
        struct bpf_prog *bpf_verdict;
        struct list_head maps;
+       spinlock_t maps_lock;
 
        /* Back reference used when sock callback trigger sockmap operations */
        struct sock *sock;
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
 static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
 static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
                            int offset, size_t size, int flags);
+static void bpf_tcp_close(struct sock *sk, long timeout);
 
 static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
 {
@@ -161,7 +164,42 @@ out:
        return !empty;
 }
 
-static struct proto tcp_bpf_proto;
+enum {
+       SOCKMAP_IPV4,
+       SOCKMAP_IPV6,
+       SOCKMAP_NUM_PROTS,
+};
+
+enum {
+       SOCKMAP_BASE,
+       SOCKMAP_TX,
+       SOCKMAP_NUM_CONFIGS,
+};
+
+static struct proto *saved_tcpv6_prot __read_mostly;
+static DEFINE_SPINLOCK(tcpv6_prot_lock);
+static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
+static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
+                        struct proto *base)
+{
+       prot[SOCKMAP_BASE]                      = *base;
+       prot[SOCKMAP_BASE].close                = bpf_tcp_close;
+       prot[SOCKMAP_BASE].recvmsg              = bpf_tcp_recvmsg;
+       prot[SOCKMAP_BASE].stream_memory_read   = bpf_tcp_stream_read;
+
+       prot[SOCKMAP_TX]                        = prot[SOCKMAP_BASE];
+       prot[SOCKMAP_TX].sendmsg                = bpf_tcp_sendmsg;
+       prot[SOCKMAP_TX].sendpage               = bpf_tcp_sendpage;
+}
+
+static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
+{
+       int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
+       int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
+
+       sk->sk_prot = &bpf_tcp_prots[family][conf];
+}
+
 static int bpf_tcp_init(struct sock *sk)
 {
        struct smap_psock *psock;
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk)
        psock->save_close = sk->sk_prot->close;
        psock->sk_proto = sk->sk_prot;
 
-       if (psock->bpf_tx_msg) {
-               tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
-               tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
-               tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
-               tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
+       /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
+       if (sk->sk_family == AF_INET6 &&
+           unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+               spin_lock_bh(&tcpv6_prot_lock);
+               if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+                       build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
+                       smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+               }
+               spin_unlock_bh(&tcpv6_prot_lock);
        }
-
-       sk->sk_prot = &tcp_bpf_proto;
+       update_sk_prot(sk, psock);
        rcu_read_unlock();
        return 0;
 }
@@ -219,16 +260,54 @@ out:
        rcu_read_unlock();
 }
 
+static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
+                                        u32 hash, void *key, u32 key_size)
+{
+       struct htab_elem *l;
+
+       hlist_for_each_entry_rcu(l, head, hash_node) {
+               if (l->hash == hash && !memcmp(&l->key, key, key_size))
+                       return l;
+       }
+
+       return NULL;
+}
+
+static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+{
+       return &__select_bucket(htab, hash)->head;
+}
+
 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
 {
        atomic_dec(&htab->count);
        kfree_rcu(l, rcu);
 }
 
+static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
+                                                 struct smap_psock *psock)
+{
+       struct smap_psock_map_entry *e;
+
+       spin_lock_bh(&psock->maps_lock);
+       e = list_first_entry_or_null(&psock->maps,
+                                    struct smap_psock_map_entry,
+                                    list);
+       if (e)
+               list_del(&e->list);
+       spin_unlock_bh(&psock->maps_lock);
+       return e;
+}
+
 static void bpf_tcp_close(struct sock *sk, long timeout)
 {
        void (*close_fun)(struct sock *sk, long timeout);
-       struct smap_psock_map_entry *e, *tmp;
+       struct smap_psock_map_entry *e;
        struct sk_msg_buff *md, *mtmp;
        struct smap_psock *psock;
        struct sock *osk;
@@ -247,7 +326,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
         */
        close_fun = psock->save_close;
 
-       write_lock_bh(&sk->sk_callback_lock);
        if (psock->cork) {
                free_start_sg(psock->sock, psock->cork);
                kfree(psock->cork);
@@ -260,20 +338,38 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
                kfree(md);
        }
 
-       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+       e = psock_map_pop(sk, psock);
+       while (e) {
                if (e->entry) {
                        osk = cmpxchg(e->entry, sk, NULL);
                        if (osk == sk) {
-                               list_del(&e->list);
                                smap_release_sock(psock, sk);
                        }
                } else {
-                       hlist_del_rcu(&e->hash_link->hash_node);
-                       smap_release_sock(psock, e->hash_link->sk);
-                       free_htab_elem(e->htab, e->hash_link);
+                       struct htab_elem *link = rcu_dereference(e->hash_link);
+                       struct bpf_htab *htab = rcu_dereference(e->htab);
+                       struct hlist_head *head;
+                       struct htab_elem *l;
+                       struct bucket *b;
+
+                       b = __select_bucket(htab, link->hash);
+                       head = &b->head;
+                       raw_spin_lock_bh(&b->lock);
+                       l = lookup_elem_raw(head,
+                                           link->hash, link->key,
+                                           htab->map.key_size);
+                       /* If another thread deleted this object skip deletion.
+                        * The refcnt on psock may or may not be zero.
+                        */
+                       if (l) {
+                               hlist_del_rcu(&link->hash_node);
+                               smap_release_sock(psock, link->sk);
+                               free_htab_elem(htab, link);
+                       }
+                       raw_spin_unlock_bh(&b->lock);
                }
+               e = psock_map_pop(sk, psock);
        }
-       write_unlock_bh(&sk->sk_callback_lock);
        rcu_read_unlock();
        close_fun(sk, timeout);
 }
@@ -1111,8 +1207,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
 
 static int bpf_tcp_ulp_register(void)
 {
-       tcp_bpf_proto = tcp_prot;
-       tcp_bpf_proto.close = bpf_tcp_close;
+       build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
        /* Once BPF TX ULP is registered it is never unregistered. It
         * will be in the ULP list for the lifetime of the system. Doing
         * duplicate registers is not a problem.
@@ -1357,7 +1452,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
 {
        if (refcount_dec_and_test(&psock->refcnt)) {
                tcp_cleanup_ulp(sock);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_stop_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
                clear_bit(SMAP_TX_RUNNING, &psock->state);
                rcu_assign_sk_user_data(sock, NULL);
                call_rcu_sched(&psock->rcu, smap_destroy_psock);
@@ -1508,6 +1605,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node)
        INIT_LIST_HEAD(&psock->maps);
        INIT_LIST_HEAD(&psock->ingress);
        refcount_set(&psock->refcnt, 1);
+       spin_lock_init(&psock->maps_lock);
 
        rcu_assign_sk_user_data(sock, psock);
        sock_hold(sock);
@@ -1564,18 +1662,32 @@ free_stab:
        return ERR_PTR(err);
 }
 
-static void smap_list_remove(struct smap_psock *psock,
-                            struct sock **entry,
-                            struct htab_elem *hash_link)
+static void smap_list_map_remove(struct smap_psock *psock,
+                                struct sock **entry)
 {
        struct smap_psock_map_entry *e, *tmp;
 
+       spin_lock_bh(&psock->maps_lock);
        list_for_each_entry_safe(e, tmp, &psock->maps, list) {
-               if (e->entry == entry || e->hash_link == hash_link) {
+               if (e->entry == entry)
                        list_del(&e->list);
-                       break;
-               }
        }
+       spin_unlock_bh(&psock->maps_lock);
+}
+
+static void smap_list_hash_remove(struct smap_psock *psock,
+                                 struct htab_elem *hash_link)
+{
+       struct smap_psock_map_entry *e, *tmp;
+
+       spin_lock_bh(&psock->maps_lock);
+       list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+               struct htab_elem *c = rcu_dereference(e->hash_link);
+
+               if (c == hash_link)
+                       list_del(&e->list);
+       }
+       spin_unlock_bh(&psock->maps_lock);
 }
 
 static void sock_map_free(struct bpf_map *map)
@@ -1601,7 +1713,6 @@ static void sock_map_free(struct bpf_map *map)
                if (!sock)
                        continue;
 
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -1609,10 +1720,9 @@ static void sock_map_free(struct bpf_map *map)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, &stab->sock_map[i], NULL);
+                       smap_list_map_remove(psock, &stab->sock_map[i]);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
        }
        rcu_read_unlock();
 
@@ -1661,17 +1771,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
        if (!sock)
                return -EINVAL;
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
        if (!psock)
                goto out;
 
        if (psock->bpf_parse)
                smap_stop_sock(psock, sock);
-       smap_list_remove(psock, &stab->sock_map[k], NULL);
+       smap_list_map_remove(psock, &stab->sock_map[k]);
        smap_release_sock(psock, sock);
 out:
-       write_unlock_bh(&sock->sk_callback_lock);
        return 0;
 }
 
@@ -1752,7 +1860,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                }
        }
 
-       write_lock_bh(&sock->sk_callback_lock);
        psock = smap_psock_sk(sock);
 
        /* 2. Do not allow inheriting programs if psock exists and has
@@ -1809,7 +1916,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
                if (err)
                        goto out_free;
                smap_init_progs(psock, verdict, parse);
+               write_lock_bh(&sock->sk_callback_lock);
                smap_start_sock(psock, sock);
+               write_unlock_bh(&sock->sk_callback_lock);
        }
 
        /* 4. Place psock in sockmap for use and stop any programs on
@@ -1819,9 +1928,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
         */
        if (map_link) {
                e->entry = map_link;
+               spin_lock_bh(&psock->maps_lock);
                list_add_tail(&e->list, &psock->maps);
+               spin_unlock_bh(&psock->maps_lock);
        }
-       write_unlock_bh(&sock->sk_callback_lock);
        return err;
 out_free:
        smap_release_sock(psock, sock);
@@ -1832,7 +1942,6 @@ out_progs:
        }
        if (tx_msg)
                bpf_prog_put(tx_msg);
-       write_unlock_bh(&sock->sk_callback_lock);
        kfree(e);
        return err;
 }
@@ -1869,10 +1978,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
        if (osock) {
                struct smap_psock *opsock = smap_psock_sk(osock);
 
-               write_lock_bh(&osock->sk_callback_lock);
-               smap_list_remove(opsock, &stab->sock_map[i], NULL);
+               smap_list_map_remove(opsock, &stab->sock_map[i]);
                smap_release_sock(opsock, osock);
-               write_unlock_bh(&osock->sk_callback_lock);
        }
 out:
        return err;
@@ -1915,6 +2022,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
        return 0;
 }
 
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+                       struct bpf_prog *prog)
+{
+       int ufd = attr->target_fd;
+       struct bpf_map *map;
+       struct fd f;
+       int err;
+
+       f = fdget(ufd);
+       map = __bpf_map_get(f);
+       if (IS_ERR(map))
+               return PTR_ERR(map);
+
+       err = sock_map_prog(map, prog, attr->attach_type);
+       fdput(f);
+       return err;
+}
+
 static void *sock_map_lookup(struct bpf_map *map, void *key)
 {
        return NULL;
@@ -2043,14 +2168,13 @@ free_htab:
        return ERR_PTR(err);
 }
 
-static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+static void __bpf_htab_free(struct rcu_head *rcu)
 {
-       return &htab->buckets[hash & (htab->n_buckets - 1)];
-}
+       struct bpf_htab *htab;
 
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
-{
-       return &__select_bucket(htab, hash)->head;
+       htab = container_of(rcu, struct bpf_htab, rcu);
+       bpf_map_area_free(htab->buckets);
+       kfree(htab);
 }
 
 static void sock_hash_free(struct bpf_map *map)
@@ -2069,16 +2193,18 @@ static void sock_hash_free(struct bpf_map *map)
         */
        rcu_read_lock();
        for (i = 0; i < htab->n_buckets; i++) {
-               struct hlist_head *head = select_bucket(htab, i);
+               struct bucket *b = __select_bucket(htab, i);
+               struct hlist_head *head;
                struct hlist_node *n;
                struct htab_elem *l;
 
+               raw_spin_lock_bh(&b->lock);
+               head = &b->head;
                hlist_for_each_entry_safe(l, n, head, hash_node) {
                        struct sock *sock = l->sk;
                        struct smap_psock *psock;
 
                        hlist_del_rcu(&l->hash_node);
-                       write_lock_bh(&sock->sk_callback_lock);
                        psock = smap_psock_sk(sock);
                        /* This check handles a racing sock event that can get
                         * the sk_callback_lock before this case but after xchg
@@ -2086,16 +2212,15 @@ static void sock_hash_free(struct bpf_map *map)
                         * (psock) to be null and queued for garbage collection.
                         */
                        if (likely(psock)) {
-                               smap_list_remove(psock, NULL, l);
+                               smap_list_hash_remove(psock, l);
                                smap_release_sock(psock, sock);
                        }
-                       write_unlock_bh(&sock->sk_callback_lock);
-                       kfree(l);
+                       free_htab_elem(htab, l);
                }
+               raw_spin_unlock_bh(&b->lock);
        }
        rcu_read_unlock();
-       bpf_map_area_free(htab->buckets);
-       kfree(htab);
+       call_rcu(&htab->rcu, __bpf_htab_free);
 }
 
 static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
@@ -2122,19 +2247,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
        return l_new;
 }
 
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
-                                        u32 hash, void *key, u32 key_size)
-{
-       struct htab_elem *l;
-
-       hlist_for_each_entry_rcu(l, head, hash_node) {
-               if (l->hash == hash && !memcmp(&l->key, key, key_size))
-                       return l;
-       }
-
-       return NULL;
-}
-
 static inline u32 htab_map_hash(const void *key, u32 key_len)
 {
        return jhash(key, key_len, 0);
@@ -2254,9 +2366,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                goto bucket_err;
        }
 
-       e->hash_link = l_new;
-       e->htab = container_of(map, struct bpf_htab, map);
+       rcu_assign_pointer(e->hash_link, l_new);
+       rcu_assign_pointer(e->htab,
+                          container_of(map, struct bpf_htab, map));
+       spin_lock_bh(&psock->maps_lock);
        list_add_tail(&e->list, &psock->maps);
+       spin_unlock_bh(&psock->maps_lock);
 
        /* add new element to the head of the list, so that
         * concurrent search will find it before old elem
@@ -2266,7 +2381,7 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
                psock = smap_psock_sk(l_old->sk);
 
                hlist_del_rcu(&l_old->hash_node);
-               smap_list_remove(psock, NULL, l_old);
+               smap_list_hash_remove(psock, l_old);
                smap_release_sock(psock, l_old->sk);
                free_htab_elem(htab, l_old);
        }
@@ -2326,7 +2441,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                struct smap_psock *psock;
 
                hlist_del_rcu(&l->hash_node);
-               write_lock_bh(&sock->sk_callback_lock);
                psock = smap_psock_sk(sock);
                /* This check handles a racing sock event that can get the
                 * sk_callback_lock before this case but after xchg happens
@@ -2334,10 +2448,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
                 * to be null and queued for garbage collection.
                 */
                if (likely(psock)) {
-                       smap_list_remove(psock, NULL, l);
+                       smap_list_hash_remove(psock, l);
                        smap_release_sock(psock, sock);
                }
-               write_unlock_bh(&sock->sk_callback_lock);
                free_htab_elem(htab, l);
                ret = 0;
        }
@@ -2383,6 +2496,7 @@ const struct bpf_map_ops sock_hash_ops = {
        .map_get_next_key = sock_hash_get_next_key,
        .map_update_elem = sock_hash_update_elem,
        .map_delete_elem = sock_hash_delete_elem,
+       .map_release_uref = sock_map_release,
 };
 
 BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
index 35dc466641f2609cdd13a6f179e81c877144e5c7..d10ecd78105fa66d2ffbd6b27c42fec2a6e6b09c 100644 (file)
@@ -1483,8 +1483,6 @@ out_free_tp:
        return err;
 }
 
-#ifdef CONFIG_CGROUP_BPF
-
 static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
                                             enum bpf_attach_type attach_type)
 {
@@ -1499,40 +1497,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
 
 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
 
-static int sockmap_get_from_fd(const union bpf_attr *attr,
-                              int type, bool attach)
-{
-       struct bpf_prog *prog = NULL;
-       int ufd = attr->target_fd;
-       struct bpf_map *map;
-       struct fd f;
-       int err;
-
-       f = fdget(ufd);
-       map = __bpf_map_get(f);
-       if (IS_ERR(map))
-               return PTR_ERR(map);
-
-       if (attach) {
-               prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
-               if (IS_ERR(prog)) {
-                       fdput(f);
-                       return PTR_ERR(prog);
-               }
-       }
-
-       err = sock_map_prog(map, prog, attr->attach_type);
-       if (err) {
-               fdput(f);
-               if (prog)
-                       bpf_prog_put(prog);
-               return err;
-       }
-
-       fdput(f);
-       return 0;
-}
-
 #define BPF_F_ATTACH_MASK \
        (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
 
@@ -1540,7 +1504,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
        struct bpf_prog *prog;
-       struct cgroup *cgrp;
        int ret;
 
        if (!capable(CAP_NET_ADMIN))
@@ -1577,12 +1540,15 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
+               ptype = BPF_PROG_TYPE_SK_MSG;
+               break;
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
+               ptype = BPF_PROG_TYPE_SK_SKB;
+               break;
        case BPF_LIRC_MODE2:
-               return lirc_prog_attach(attr);
+               ptype = BPF_PROG_TYPE_LIRC_MODE2;
+               break;
        default:
                return -EINVAL;
        }
@@ -1596,18 +1562,20 @@ static int bpf_prog_attach(const union bpf_attr *attr)
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp)) {
-               bpf_prog_put(prog);
-               return PTR_ERR(cgrp);
+       switch (ptype) {
+       case BPF_PROG_TYPE_SK_SKB:
+       case BPF_PROG_TYPE_SK_MSG:
+               ret = sockmap_get_from_fd(attr, ptype, prog);
+               break;
+       case BPF_PROG_TYPE_LIRC_MODE2:
+               ret = lirc_prog_attach(attr, prog);
+               break;
+       default:
+               ret = cgroup_bpf_prog_attach(attr, ptype, prog);
        }
 
-       ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
-                               attr->attach_flags);
        if (ret)
                bpf_prog_put(prog);
-       cgroup_put(cgrp);
-
        return ret;
 }
 
@@ -1616,9 +1584,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
 static int bpf_prog_detach(const union bpf_attr *attr)
 {
        enum bpf_prog_type ptype;
-       struct bpf_prog *prog;
-       struct cgroup *cgrp;
-       int ret;
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
@@ -1651,29 +1616,17 @@ static int bpf_prog_detach(const union bpf_attr *attr)
                ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
                break;
        case BPF_SK_MSG_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
        case BPF_SK_SKB_STREAM_PARSER:
        case BPF_SK_SKB_STREAM_VERDICT:
-               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
+               return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
        case BPF_LIRC_MODE2:
                return lirc_prog_detach(attr);
        default:
                return -EINVAL;
        }
 
-       cgrp = cgroup_get_from_fd(attr->target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-
-       prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
-       if (IS_ERR(prog))
-               prog = NULL;
-
-       ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
-       if (prog)
-               bpf_prog_put(prog);
-       cgroup_put(cgrp);
-       return ret;
+       return cgroup_bpf_prog_detach(attr, ptype);
 }
 
 #define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
@@ -1681,9 +1634,6 @@ static int bpf_prog_detach(const union bpf_attr *attr)
 static int bpf_prog_query(const union bpf_attr *attr,
                          union bpf_attr __user *uattr)
 {
-       struct cgroup *cgrp;
-       int ret;
-
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
        if (CHECK_ATTR(BPF_PROG_QUERY))
@@ -1711,14 +1661,9 @@ static int bpf_prog_query(const union bpf_attr *attr,
        default:
                return -EINVAL;
        }
-       cgrp = cgroup_get_from_fd(attr->query.target_fd);
-       if (IS_ERR(cgrp))
-               return PTR_ERR(cgrp);
-       ret = cgroup_bpf_query(cgrp, attr, uattr);
-       cgroup_put(cgrp);
-       return ret;
+
+       return cgroup_bpf_prog_query(attr, uattr);
 }
-#endif /* CONFIG_CGROUP_BPF */
 
 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
 
@@ -2365,7 +2310,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_OBJ_GET:
                err = bpf_obj_get(&attr);
                break;
-#ifdef CONFIG_CGROUP_BPF
        case BPF_PROG_ATTACH:
                err = bpf_prog_attach(&attr);
                break;
@@ -2375,7 +2319,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
        case BPF_PROG_QUERY:
                err = bpf_prog_query(&attr, uattr);
                break;
-#endif
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
index 60aedc87936106460e436fe66429d45a59f36060..08d3d59dca17343c1a91def02d0a7da931c1c0f0 100644 (file)
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = {
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Ctx heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                {
                        {  1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
                        { 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
                },
                .fill_helper = bpf_fill_maxinsns6,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Call heavy transformations",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC | FLAG_NO_DATA,
+#endif
                { },
                { { 1, 0 }, { 10, 0 } },
                .fill_helper = bpf_fill_maxinsns7,
+               .expected_errcode = -ENOTSUPP,
        },
        {       /* Mainly checking JIT here. */
                "BPF_MAXINSNS: Jump heavy test",
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = {
        {
                "BPF_MAXINSNS: exec all MSH",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { 0xfa, 0xfb, 0xfc, 0xfd, },
                { { 4, 0xababab83 } },
                .fill_helper = bpf_fill_maxinsns13,
+               .expected_errcode = -ENOTSUPP,
        },
        {
                "BPF_MAXINSNS: ld_abs+get_processor_id",
                { },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+               CLASSIC | FLAG_EXPECTED_FAIL,
+#else
                CLASSIC,
+#endif
                { },
                { { 1, 0xbee } },
                .fill_helper = bpf_fill_ld_abs_get_processor_id,
+               .expected_errcode = -ENOTSUPP,
        },
        /*
         * LD_IND / LD_ABS on fragmented SKBs
index 56e2d9125ea55a57632feb22c64f2c1a76f5ec6e..38c926520c9718b8929a72829931080a3a53502d 100644 (file)
@@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = {
 
 void __dump_page(struct page *page, const char *reason)
 {
+       bool page_poisoned = PagePoisoned(page);
+       int mapcount;
+
+       /*
+        * If struct page is poisoned don't access Page*() functions as that
+        * leads to recursive loop. Page*() check for poisoned pages, and calls
+        * dump_page() when detected.
+        */
+       if (page_poisoned) {
+               pr_emerg("page:%px is uninitialized and poisoned", page);
+               goto hex_only;
+       }
+
        /*
         * Avoid VM_BUG_ON() in page_mapcount().
         * page->_mapcount space in struct page is used by sl[aou]b pages to
         * encode own info.
         */
-       int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+       mapcount = PageSlab(page) ? 0 : page_mapcount(page);
 
        pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
                  page, page_ref_count(page), mapcount,
@@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason)
 
        pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
 
+hex_only:
        print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
                        sizeof(unsigned long), page,
                        sizeof(struct page), false);
@@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason)
                pr_alert("page dumped because: %s\n", reason);
 
 #ifdef CONFIG_MEMCG
-       if (page->mem_cgroup)
+       if (!page_poisoned && page->mem_cgroup)
                pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
 #endif
 }
index 3612fbb32e9d5412e8494e4c220fad84e3a4e779..039ddbc574e926800f9104ede90782753605f46b 100644 (file)
@@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void)
                 */
                if (hstate_is_gigantic(h))
                        adjust_managed_page_count(page, 1 << h->order);
+               cond_resched();
        }
 }
 
index f185455b34065d27efa2b6a90c9dd2c1dfe92ae9..c3bd5209da380d9a51a0fa4515f4fcdeefcad409 100644 (file)
@@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
 int kasan_module_alloc(void *addr, size_t size)
 {
        void *ret;
+       size_t scaled_size;
        size_t shadow_size;
        unsigned long shadow_start;
 
        shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-       shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
-                       PAGE_SIZE);
+       scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+       shadow_size = round_up(scaled_size, PAGE_SIZE);
 
        if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
                return -EINVAL;
index 73a65789271ba9346902dd721b0accd8ce747adc..8ccee3d01822f78184357141ced7a07c3109dc2c 100644 (file)
@@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index 13ec0d5415c74486c68f8290689d16d78513e6e9..bdaf53925acd5606fdb953800620bd05cf0f259e 100644 (file)
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS)            += tls/
 obj-$(CONFIG_XFRM)             += xfrm/
 obj-$(CONFIG_UNIX)             += unix/
 obj-$(CONFIG_NET)              += ipv6/
-ifneq ($(CC_CAN_LINK),y)
-$(warning CC cannot link executables. Skipping bpfilter.)
-else
 obj-$(CONFIG_BPFILTER)         += bpfilter/
-endif
 obj-$(CONFIG_PACKET)           += packet/
 obj-$(CONFIG_NET_KEY)          += key/
 obj-$(CONFIG_BRIDGE)           += bridge/
index a948b072c28f36451a587a88bcb6f86c32023693..76deb661588322d9cf8ac6bdd73ba63f5d1416fc 100644 (file)
@@ -1,6 +1,5 @@
 menuconfig BPFILTER
        bool "BPF based packet filtering framework (BPFILTER)"
-       default n
        depends on NET && BPF && INET
        help
          This builds experimental bpfilter framework that is aiming to
@@ -9,6 +8,7 @@ menuconfig BPFILTER
 if BPFILTER
 config BPFILTER_UMH
        tristate "bpfilter kernel module with user mode helper"
+       depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC))
        default m
        help
          This builds bpfilter kernel module with embedded user mode helper
index 051dc18b8ccb5032dae34b5b87e6123674820f36..39c6980b5d9952eed1046f656d8c0a85b4a0d2d6 100644 (file)
@@ -15,20 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y)
 HOSTLDFLAGS += -static
 endif
 
-# a bit of elf magic to convert bpfilter_umh binary into a binary blob
-# inside bpfilter_umh.o elf file referenced by
-# _binary_net_bpfilter_bpfilter_umh_start symbol
-# which bpfilter_kern.c passes further into umh blob loader at run-time
-quiet_cmd_copy_umh = GEN $@
-      cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
-      $(OBJCOPY) -I binary \
-          `LC_ALL=C $(OBJDUMP) -f net/bpfilter/bpfilter_umh \
-          |awk -F' |,' '/file format/{print "-O",$$NF} \
-          /^architecture:/{print "-B",$$2}'` \
-      --rename-section .data=.init.rodata $< $@
-
-$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
-       $(call cmd,copy_umh)
+$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
 
 obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o
-bpfilter-objs += bpfilter_kern.o bpfilter_umh.o
+bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o
index 09522573f611b01ba5fb4d52125e8264d9147f20..f0fc182d3db77eb311d91f7faef4e8a6f85886b3 100644 (file)
 #include <linux/file.h>
 #include "msgfmt.h"
 
-#define UMH_start _binary_net_bpfilter_bpfilter_umh_start
-#define UMH_end _binary_net_bpfilter_bpfilter_umh_end
-
-extern char UMH_start;
-extern char UMH_end;
+extern char bpfilter_umh_start;
+extern char bpfilter_umh_end;
 
 static struct umh_info info;
 /* since ip_getsockopt() can run in parallel, serialize access to umh */
@@ -93,7 +90,9 @@ static int __init load_umh(void)
        int err;
 
        /* fork usermode process */
-       err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info);
+       err = fork_usermode_blob(&bpfilter_umh_start,
+                                &bpfilter_umh_end - &bpfilter_umh_start,
+                                &info);
        if (err)
                return err;
        pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
new file mode 100644 (file)
index 0000000..40311d1
--- /dev/null
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+       .section .init.rodata, "a"
+       .global bpfilter_umh_start
+bpfilter_umh_start:
+       .incbin "net/bpfilter/bpfilter_umh"
+       .global bpfilter_umh_end
+bpfilter_umh_end:
index a04e1e88bf3ab49340d788589c365aaf45d9d3e2..50537ff961a722e18731b7b9671deb739bfce847 100644 (file)
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
                if (ifr->ifr_qlen < 0)
                        return -EINVAL;
                if (dev->tx_queue_len ^ ifr->ifr_qlen) {
-                       unsigned int orig_len = dev->tx_queue_len;
-
-                       dev->tx_queue_len = ifr->ifr_qlen;
-                       err = call_netdevice_notifiers(
-                                       NETDEV_CHANGE_TX_QUEUE_LEN, dev);
-                       err = notifier_to_errno(err);
-                       if (err) {
-                               dev->tx_queue_len = orig_len;
+                       err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
+                       if (err)
                                return err;
-                       }
                }
                return 0;
 
index 126ffc5bc630cb412e4bcf1a48869ec6711fda54..f64aa13811eaeedf8f0040bc9f993ad9e1661eca 100644 (file)
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->mark && r->mark != rule->mark)
                        continue;
 
+               if (rule->suppress_ifgroup != -1 &&
+                   r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (rule->suppress_prefixlen != -1 &&
+                   r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
                if (rule->mark_mask && r->mark_mask != rule->mark_mask)
                        continue;
 
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
                if (rule->ip_proto && r->ip_proto != rule->ip_proto)
                        continue;
 
+               if (rule->proto && r->proto != rule->proto)
+                       continue;
+
                if (fib_rule_port_range_set(&rule->sport_range) &&
                    !fib_rule_port_range_compare(&r->sport_range,
                                                 &rule->sport_range))
@@ -645,6 +656,73 @@ errout:
        return err;
 }
 
+static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
+                      struct nlattr **tb, struct fib_rule *rule)
+{
+       struct fib_rule *r;
+
+       list_for_each_entry(r, &ops->rules_list, list) {
+               if (r->action != rule->action)
+                       continue;
+
+               if (r->table != rule->table)
+                       continue;
+
+               if (r->pref != rule->pref)
+                       continue;
+
+               if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
+                       continue;
+
+               if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
+                       continue;
+
+               if (r->mark != rule->mark)
+                       continue;
+
+               if (r->suppress_ifgroup != rule->suppress_ifgroup)
+                       continue;
+
+               if (r->suppress_prefixlen != rule->suppress_prefixlen)
+                       continue;
+
+               if (r->mark_mask != rule->mark_mask)
+                       continue;
+
+               if (r->tun_id != rule->tun_id)
+                       continue;
+
+               if (r->fr_net != rule->fr_net)
+                       continue;
+
+               if (r->l3mdev != rule->l3mdev)
+                       continue;
+
+               if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
+                   !uid_eq(r->uid_range.end, rule->uid_range.end))
+                       continue;
+
+               if (r->ip_proto != rule->ip_proto)
+                       continue;
+
+               if (r->proto != rule->proto)
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->sport_range,
+                                                &rule->sport_range))
+                       continue;
+
+               if (!fib_rule_port_range_compare(&r->dport_range,
+                                                &rule->dport_range))
+                       continue;
+
+               if (!ops->compare(r, frh, tb))
+                       continue;
+               return 1;
+       }
+       return 0;
+}
+
 int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                   struct netlink_ext_ack *extack)
 {
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto errout;
 
        if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
-           rule_find(ops, frh, tb, rule, user_priority)) {
+           rule_exists(ops, frh, tb, rule)) {
                err = -EEXIST;
                goto errout_free;
        }
index e7f12e9f598c8edfc684e05e3d290a952aa9cf2f..0ca6907d7efee75b68450f66c2c5a7172272895e 100644 (file)
@@ -4073,8 +4073,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
        memcpy(params->smac, dev->dev_addr, ETH_ALEN);
        params->h_vlan_TCI = 0;
        params->h_vlan_proto = 0;
+       params->ifindex = dev->ifindex;
 
-       return dev->ifindex;
+       return 0;
 }
 #endif
 
@@ -4098,7 +4099,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        /* verify forwarding is enabled on this interface */
        in_dev = __in_dev_get_rcu(dev);
        if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl4.flowi4_iif = 1;
@@ -4123,7 +4124,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = fib_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
        } else {
@@ -4135,8 +4136,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
                err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
        }
 
-       if (err || res.type != RTN_UNICAST)
-               return 0;
+       if (err) {
+               /* map fib lookup errors to RTN_ type */
+               if (err == -EINVAL)
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               if (err == -EHOSTUNREACH)
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               if (err == -EACCES)
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+       }
+
+       if (res.type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (res.fi->fib_nhs > 1)
                fib_select_path(net, &res, &fl4, NULL);
@@ -4144,19 +4157,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        nh = &res.fi->fib_nh[res.nh_sel];
 
        /* do not handle lwt encaps right now */
        if (nh->nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        dev = nh->nh_dev;
-       if (unlikely(!dev))
-               return 0;
-
        if (nh->nh_gw)
                params->ipv4_dst = nh->nh_gw;
 
@@ -4166,10 +4176,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         * rcu_read_lock_bh is not needed here
         */
        neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4190,7 +4200,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        /* link local addresses are never forwarded */
        if (rt6_need_strict(dst) || rt6_need_strict(src))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        dev = dev_get_by_index_rcu(net, params->ifindex);
        if (unlikely(!dev))
@@ -4198,7 +4208,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
        idev = __in6_dev_get_safely(dev);
        if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
-               return 0;
+               return BPF_FIB_LKUP_RET_FWD_DISABLED;
 
        if (flags & BPF_FIB_LOOKUP_OUTPUT) {
                fl6.flowi6_iif = 1;
@@ -4225,7 +4235,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
 
                tb = ipv6_stub->fib6_get_table(net, tbid);
                if (unlikely(!tb))
-                       return 0;
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
 
                f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
        } else {
@@ -4238,11 +4248,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        }
 
        if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
-               return 0;
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
+
+       if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
+               switch (f6i->fib6_type) {
+               case RTN_BLACKHOLE:
+                       return BPF_FIB_LKUP_RET_BLACKHOLE;
+               case RTN_UNREACHABLE:
+                       return BPF_FIB_LKUP_RET_UNREACHABLE;
+               case RTN_PROHIBIT:
+                       return BPF_FIB_LKUP_RET_PROHIBIT;
+               default:
+                       return BPF_FIB_LKUP_RET_NOT_FWDED;
+               }
+       }
 
-       if (unlikely(f6i->fib6_flags & RTF_REJECT ||
-           f6i->fib6_type != RTN_UNICAST))
-               return 0;
+       if (f6i->fib6_type != RTN_UNICAST)
+               return BPF_FIB_LKUP_RET_NOT_FWDED;
 
        if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
                f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
@@ -4252,11 +4274,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
        if (check_mtu) {
                mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
                if (params->tot_len > mtu)
-                       return 0;
+                       return BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
        if (f6i->fib6_nh.nh_lwtstate)
-               return 0;
+               return BPF_FIB_LKUP_RET_UNSUPP_LWT;
 
        if (f6i->fib6_flags & RTF_GATEWAY)
                *dst = f6i->fib6_nh.nh_gw;
@@ -4270,10 +4292,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
         */
        neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
                                      ndisc_hashfn, dst, dev);
-       if (neigh)
-               return bpf_fib_set_fwd_params(params, neigh, dev);
+       if (!neigh)
+               return BPF_FIB_LKUP_RET_NO_NEIGH;
 
-       return 0;
+       return bpf_fib_set_fwd_params(params, neigh, dev);
 }
 #endif
 
@@ -4315,7 +4337,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
           struct bpf_fib_lookup *, params, int, plen, u32, flags)
 {
        struct net *net = dev_net(skb->dev);
-       int index = -EAFNOSUPPORT;
+       int rc = -EAFNOSUPPORT;
 
        if (plen < sizeof(*params))
                return -EINVAL;
@@ -4326,25 +4348,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
        switch (params->family) {
 #if IS_ENABLED(CONFIG_INET)
        case AF_INET:
-               index = bpf_ipv4_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv4_fib_lookup(net, params, flags, false);
                break;
 #endif
 #if IS_ENABLED(CONFIG_IPV6)
        case AF_INET6:
-               index = bpf_ipv6_fib_lookup(net, params, flags, false);
+               rc = bpf_ipv6_fib_lookup(net, params, flags, false);
                break;
 #endif
        }
 
-       if (index > 0) {
+       if (!rc) {
                struct net_device *dev;
 
-               dev = dev_get_by_index_rcu(net, index);
+               dev = dev_get_by_index_rcu(net, params->ifindex);
                if (!is_skb_forwardable(dev, skb))
-                       index = 0;
+                       rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
        }
 
-       return index;
+       return rc;
 }
 
 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
index c642304f178ce0a4e1358d59e45032a39f76fb3f..eba8dae22c25b2fe0e49c65392f9c69760f1396f 100644 (file)
@@ -5276,8 +5276,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
                        if (npages >= 1 << order) {
                                page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
                                                   __GFP_COMP |
-                                                  __GFP_NOWARN |
-                                                  __GFP_NORETRY,
+                                                  __GFP_NOWARN,
                                                   order);
                                if (page)
                                        goto fill_page;
index bcc41829a16d50714bdd3c25c976c0b7296fab84..9e8f65585b81152fd9e6c72f7a285a52ef23a686 100644 (file)
@@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot)
 
        rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
                                           rsk_prot->obj_size, 0,
-                                          prot->slab_flags, NULL);
+                                          SLAB_ACCOUNT | prot->slab_flags,
+                                          NULL);
 
        if (!rsk_prot->slab) {
                pr_crit("%s: Can't create request sock SLAB cache!\n",
@@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab)
        if (alloc_slab) {
                prot->slab = kmem_cache_create_usercopy(prot->name,
                                        prot->obj_size, 0,
-                                       SLAB_HWCACHE_ALIGN | prot->slab_flags,
+                                       SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
+                                       prot->slab_flags,
                                        prot->useroffset, prot->usersize,
                                        NULL);
 
@@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab)
                                kmem_cache_create(prot->twsk_prot->twsk_slab_name,
                                                  prot->twsk_prot->twsk_obj_size,
                                                  0,
+                                                 SLAB_ACCOUNT |
                                                  prot->slab_flags,
                                                  NULL);
                        if (prot->twsk_prot->twsk_slab == NULL)
index 1540db65241a6fd4d96b00546f13a3e3d3cd1815..c9ec1603666bffcfb24597b933a05f53b6d83440 100644 (file)
@@ -448,9 +448,7 @@ next_proto:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
-       skb_gro_remcsum_cleanup(skb, &grc);
-       skb->remcsum_offload = 0;
+       skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
 
        return pp;
 }
index 1859c473b21a862b383edebbcf2c1656f9c58b3b..6a7d980105f60514c8180e6333f0a4a53912c3d5 100644 (file)
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
 
        return pp;
 }
index d06247ba08b2667b1049329e8921af9388545c54..af0a857d8352f2bc94d4f6baa0438a7ac9b2157b 100644 (file)
@@ -265,8 +265,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
            ipv4.sysctl_tcp_fastopen);
        struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
        struct tcp_fastopen_context *ctxt;
-       int ret;
        u32  user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+       __le32 key[4];
+       int ret, i;
 
        tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
        if (!tbl.data)
@@ -275,11 +276,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
        rcu_read_lock();
        ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
        if (ctxt)
-               memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+               memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
        else
-               memset(user_key, 0, sizeof(user_key));
+               memset(key, 0, sizeof(key));
        rcu_read_unlock();
 
+       for (i = 0; i < ARRAY_SIZE(key); i++)
+               user_key[i] = le32_to_cpu(key[i]);
+
        snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
                user_key[0], user_key[1], user_key[2], user_key[3]);
        ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -290,13 +294,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
                        ret = -EINVAL;
                        goto bad_key;
                }
-               tcp_fastopen_reset_cipher(net, NULL, user_key,
+
+               for (i = 0; i < ARRAY_SIZE(user_key); i++)
+                       key[i] = cpu_to_le32(user_key[i]);
+
+               tcp_fastopen_reset_cipher(net, NULL, key,
                                          TCP_FASTOPEN_KEY_LENGTH);
        }
 
 bad_key:
        pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
-              user_key[0], user_key[1], user_key[2], user_key[3],
+               user_key[0], user_key[1], user_key[2], user_key[3],
               (char *)tbl.data, ret);
        kfree(tbl.data);
        return ret;
index 355d3dffd021ccad0f30891994289d916f7d276c..8e5522c6833ad5b01c32590f62e2b20131869327 100644 (file)
@@ -265,7 +265,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
                 * it is probably a retransmit.
                 */
                if (tp->ecn_flags & TCP_ECN_SEEN)
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                break;
        case INET_ECN_CE:
                if (tcp_ca_needs_ecn(sk))
@@ -273,7 +273,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
 
                if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
                        /* Better not delay acks, sender can have a very low cwnd */
-                       tcp_enter_quickack_mode(sk, 1);
+                       tcp_enter_quickack_mode(sk, 2);
                        tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
                }
                tp->ecn_flags |= TCP_ECN_SEEN;
@@ -3181,6 +3181,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
 
                if (tcp_is_reno(tp)) {
                        tcp_remove_reno_sacks(sk, pkts_acked);
+
+                       /* If any of the cumulatively ACKed segments was
+                        * retransmitted, non-SACK case cannot confirm that
+                        * progress was due to original transmission due to
+                        * lack of TCPCB_SACKED_ACKED bits even if some of
+                        * the packets may have been never retransmitted.
+                        */
+                       if (flag & FLAG_RETRANS_DATA_ACKED)
+                               flag &= ~FLAG_ORIG_SACK_ACKED;
                } else {
                        int delta;
 
index 92dc9e5a7ff3d0a7509bfa2a66e9189c8341a5fa..69c54540d5b4f2664b78b56468b09e3c1f6ac888 100644 (file)
@@ -394,7 +394,7 @@ unflush:
 out_unlock:
        rcu_read_unlock();
 out:
-       NAPI_GRO_CB(skb)->flush |= flush;
+       skb_gro_flush_final(skb, pp, flush);
        return pp;
 }
 EXPORT_SYMBOL(udp_gro_receive);
index c134286d6a4179516709570ad534d1ae26fd0bce..91580c62bb86bc3fb67661566e9b9e82a2e14e8c 100644 (file)
@@ -4528,6 +4528,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
                               unsigned long expires, u32 flags)
 {
        struct fib6_info *f6i;
+       u32 prio;
 
        f6i = addrconf_get_prefix_route(&ifp->addr,
                                        ifp->prefix_len,
@@ -4536,13 +4537,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
        if (!f6i)
                return -ENOENT;
 
-       if (f6i->fib6_metric != ifp->rt_priority) {
+       prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
+       if (f6i->fib6_metric != prio) {
+               /* delete old one */
+               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
+
                /* add new one */
                addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
                                      ifp->rt_priority, ifp->idev->dev,
                                      expires, flags, GFP_KERNEL);
-               /* delete old one */
-               ip6_del_rt(dev_net(ifp->idev->dev), f6i);
        } else {
                if (!expires)
                        fib6_clean_expires(f6i);
index 5e0332014c1738999e680c1853829f384e880284..a452d99c9f5281b5e5d7e6f0162611deeb82212d 100644 (file)
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
        if (hdr == NULL)
                goto err_reg;
 
-       net->nf_frag.sysctl.frags_hdr = hdr;
+       net->nf_frag_frags_hdr = hdr;
        return 0;
 
 err_reg:
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
 {
        struct ctl_table *table;
 
-       table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
-       unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+       table = net->nf_frag_frags_hdr->ctl_table_arg;
+       unregister_net_sysctl_table(net->nf_frag_frags_hdr);
        if (!net_eq(net, &init_net))
                kfree(table);
 }
index 33fb35cbfac132b1a85cd2c9ce62b4344cbe8afe..558fe8cc6d43858ca828cbd8dc8ea65e63bc6602 100644 (file)
@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
                        return -ENOMEM;
 
                for_each_possible_cpu(cpu) {
-                       tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
+                       tfm = crypto_alloc_shash(algo->name, 0, 0);
                        if (IS_ERR(tfm))
                                return PTR_ERR(tfm);
                        p_tfm = per_cpu_ptr(algo->tfms, cpu);
index 44b5dfe8727d936d39338006bc89b125c848d12b..fa1f1e63a2640fd405e42e5aeae9718b4ef12d2a 100644 (file)
@@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
        skb_reset_network_header(skb);
        skb_reset_mac_header(skb);
 
+       local_bh_disable();
        __ieee80211_subif_start_xmit(skb, skb->dev, flags);
+       local_bh_enable();
 
        return 0;
 }
index d8383609fe2825b707cfb8ebc54381761ccc1108..510039862aa93c99904d2dbd3a7969327d0d896a 100644 (file)
@@ -47,6 +47,8 @@ struct nf_conncount_tuple {
        struct hlist_node               node;
        struct nf_conntrack_tuple       tuple;
        struct nf_conntrack_zone        zone;
+       int                             cpu;
+       u32                             jiffies32;
 };
 
 struct nf_conncount_rb {
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head,
                return false;
        conn->tuple = *tuple;
        conn->zone = *zone;
+       conn->cpu = raw_smp_processor_id();
+       conn->jiffies32 = (u32)jiffies;
        hlist_add_head(&conn->node, head);
        return true;
 }
 EXPORT_SYMBOL_GPL(nf_conncount_add);
 
+static const struct nf_conntrack_tuple_hash *
+find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
+{
+       const struct nf_conntrack_tuple_hash *found;
+       unsigned long a, b;
+       int cpu = raw_smp_processor_id();
+       __s32 age;
+
+       found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
+       if (found)
+               return found;
+       b = conn->jiffies32;
+       a = (u32)jiffies;
+
+       /* conn might have been added just before by another cpu and
+        * might still be unconfirmed.  In this case, nf_conntrack_find()
+        * returns no result.  Thus only evict if this cpu added the
+        * stale entry or if the entry is older than two jiffies.
+        */
+       age = a - b;
+       if (conn->cpu == cpu || age >= 2) {
+               hlist_del(&conn->node);
+               kmem_cache_free(conncount_conn_cachep, conn);
+               return ERR_PTR(-ENOENT);
+       }
+
+       return ERR_PTR(-EAGAIN);
+}
+
 unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
                                 const struct nf_conntrack_tuple *tuple,
                                 const struct nf_conntrack_zone *zone,
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
 {
        const struct nf_conntrack_tuple_hash *found;
        struct nf_conncount_tuple *conn;
-       struct hlist_node *n;
        struct nf_conn *found_ct;
+       struct hlist_node *n;
        unsigned int length = 0;
 
        *addit = tuple ? true : false;
 
        /* check the saved connections */
        hlist_for_each_entry_safe(conn, n, head, node) {
-               found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
-               if (found == NULL) {
-                       hlist_del(&conn->node);
-                       kmem_cache_free(conncount_conn_cachep, conn);
+               found = find_or_evict(net, conn);
+               if (IS_ERR(found)) {
+                       /* Not found, but might be about to be confirmed */
+                       if (PTR_ERR(found) == -EAGAIN) {
+                               length++;
+                               if (!tuple)
+                                       continue;
+
+                               if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
+                                   nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
+                                   nf_ct_zone_id(zone, zone->dir))
+                                       *addit = false;
+                       }
                        continue;
                }
 
index 551a1eddf0fab75eccf803b9711e069e61e60d5d..a75b11c393128d79107fc447c5109b7d0a786ea5 100644 (file)
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
 
        nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
        nf_ct_iterate_destroy(unhelp, me);
+
+       /* Maybe someone has gotten the helper already when unhelp above.
+        * So need to wait it.
+        */
+       synchronize_rcu();
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
 
index 4264570475788be388e603c1bc70330c812d0eb3..a61d6df6e5f64f5b2086d14f35c88a0491f77ce6 100644 (file)
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
        if (write) {
                struct ctl_table tmp = *table;
 
+               /* proc_dostring() can append to existing strings, so we need to
+                * initialize it as an empty string.
+                */
+               buf[0] = '\0';
                tmp.data = buf;
                r = proc_dostring(&tmp, write, buffer, lenp, ppos);
                if (r)
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
                rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
                mutex_unlock(&nf_log_mutex);
        } else {
+               struct ctl_table tmp = *table;
+
+               tmp.data = buf;
                mutex_lock(&nf_log_mutex);
                logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
                if (!logger)
-                       table->data = "NONE";
+                       strlcpy(buf, "NONE", sizeof(buf));
                else
-                       table->data = logger->name;
-               r = proc_dostring(table, write, buffer, lenp, ppos);
+                       strlcpy(buf, logger->name, sizeof(buf));
                mutex_unlock(&nf_log_mutex);
+               r = proc_dostring(&tmp, write, buffer, lenp, ppos);
        }
 
        return r;
index 4ccd2988f9db637166358335d8e26299c7237bec..ea4ba551abb28cb25c833dc408e23d1313b21bb4 100644 (file)
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
        [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
        [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
+       [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
+       [NFQA_CFG_MASK]         = { .type = NLA_U32 },
+       [NFQA_CFG_FLAGS]        = { .type = NLA_U32 },
 };
 
 static const struct nf_queue_handler nfqh = {
index abef75da89a7450092aefc46ed902e6602fba7a6..cfb05953b0e57afad21fd708f0df42d63c77cd55 100644 (file)
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
 
 int rds_conn_init(void)
 {
+       int ret;
+
+       ret = rds_loop_net_init(); /* register pernet callback */
+       if (ret)
+               return ret;
+
        rds_conn_slab = kmem_cache_create("rds_connection",
                                          sizeof(struct rds_connection),
                                          0, 0, NULL);
-       if (!rds_conn_slab)
+       if (!rds_conn_slab) {
+               rds_loop_net_exit();
                return -ENOMEM;
+       }
 
        rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
        rds_info_register_func(RDS_INFO_SEND_MESSAGES,
@@ -676,6 +684,7 @@ int rds_conn_init(void)
 
 void rds_conn_exit(void)
 {
+       rds_loop_net_exit(); /* unregister pernet callback */
        rds_loop_exit();
 
        WARN_ON(!hlist_empty(rds_conn_hash));
index dac6218a460ed4d4a5b7b03ad4f6056a68784a16..feea1f96ee2ad582dce8f815442da1bbf6e0508a 100644 (file)
@@ -33,6 +33,8 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/in.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
 
 #include "rds_single_path.h"
 #include "rds.h"
 
 static DEFINE_SPINLOCK(loop_conns_lock);
 static LIST_HEAD(loop_conns);
+static atomic_t rds_loop_unloading = ATOMIC_INIT(0);
+
+static void rds_loop_set_unloading(void)
+{
+       atomic_set(&rds_loop_unloading, 1);
+}
+
+static bool rds_loop_is_unloading(struct rds_connection *conn)
+{
+       return atomic_read(&rds_loop_unloading) != 0;
+}
 
 /*
  * This 'loopback' transport is a special case for flows that originate
@@ -165,6 +178,8 @@ void rds_loop_exit(void)
        struct rds_loop_connection *lc, *_lc;
        LIST_HEAD(tmp_list);
 
+       rds_loop_set_unloading();
+       synchronize_rcu();
        /* avoid calling conn_destroy with irqs off */
        spin_lock_irq(&loop_conns_lock);
        list_splice(&loop_conns, &tmp_list);
@@ -177,6 +192,46 @@ void rds_loop_exit(void)
        }
 }
 
+static void rds_loop_kill_conns(struct net *net)
+{
+       struct rds_loop_connection *lc, *_lc;
+       LIST_HEAD(tmp_list);
+
+       spin_lock_irq(&loop_conns_lock);
+       list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node)  {
+               struct net *c_net = read_pnet(&lc->conn->c_net);
+
+               if (net != c_net)
+                       continue;
+               list_move_tail(&lc->loop_node, &tmp_list);
+       }
+       spin_unlock_irq(&loop_conns_lock);
+
+       list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
+               WARN_ON(lc->conn->c_passive);
+               rds_conn_destroy(lc->conn);
+       }
+}
+
+static void __net_exit rds_loop_exit_net(struct net *net)
+{
+       rds_loop_kill_conns(net);
+}
+
+static struct pernet_operations rds_loop_net_ops = {
+       .exit = rds_loop_exit_net,
+};
+
+int rds_loop_net_init(void)
+{
+       return register_pernet_device(&rds_loop_net_ops);
+}
+
+void rds_loop_net_exit(void)
+{
+       unregister_pernet_device(&rds_loop_net_ops);
+}
+
 /*
  * This is missing .xmit_* because loop doesn't go through generic
  * rds_send_xmit() and doesn't call rds_recv_incoming().  .listen_stop and
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = {
        .inc_free               = rds_loop_inc_free,
        .t_name                 = "loopback",
        .t_type                 = RDS_TRANS_LOOP,
+       .t_unloading            = rds_loop_is_unloading,
 };
index 469fa4b2da4f38b5fb62358507cb9d9ca62aa825..bbc8cdd030df3137ea250578cb3d429a86fd68f2 100644 (file)
@@ -5,6 +5,8 @@
 /* loop.c */
 extern struct rds_transport rds_loop_transport;
 
+int rds_loop_net_init(void);
+void rds_loop_net_exit(void);
 void rds_loop_exit(void);
 
 #endif
index 973b4471b532b0e64525e94bb2cb181a88b17bbc..3c1405df936cae7015af188a0be7c7d6f4ffd0ee 100644 (file)
@@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending);  /* serialize link group
                                                 */
 
 static void smc_tcp_listen_work(struct work_struct *);
+static void smc_connect_work(struct work_struct *);
 
 static void smc_set_keepalive(struct sock *sk, int val)
 {
@@ -122,6 +123,12 @@ static int smc_release(struct socket *sock)
                goto out;
 
        smc = smc_sk(sk);
+
+       /* cleanup for a dangling non-blocking connect */
+       flush_work(&smc->connect_work);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+
        if (sk->sk_state == SMC_LISTEN)
                /* smc_close_non_accepted() is called and acquires
                 * sock lock for child sockets again
@@ -186,6 +193,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
        sk->sk_protocol = protocol;
        smc = smc_sk(sk);
        INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+       INIT_WORK(&smc->connect_work, smc_connect_work);
        INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
        INIT_LIST_HEAD(&smc->accept_q);
        spin_lock_init(&smc->accept_q_lock);
@@ -576,6 +584,35 @@ static int __smc_connect(struct smc_sock *smc)
        return 0;
 }
 
+static void smc_connect_work(struct work_struct *work)
+{
+       struct smc_sock *smc = container_of(work, struct smc_sock,
+                                           connect_work);
+       int rc;
+
+       lock_sock(&smc->sk);
+       rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
+                           smc->connect_info->alen, smc->connect_info->flags);
+       if (smc->clcsock->sk->sk_err) {
+               smc->sk.sk_err = smc->clcsock->sk->sk_err;
+               goto out;
+       }
+       if (rc < 0) {
+               smc->sk.sk_err = -rc;
+               goto out;
+       }
+
+       rc = __smc_connect(smc);
+       if (rc < 0)
+               smc->sk.sk_err = -rc;
+
+out:
+       smc->sk.sk_state_change(&smc->sk);
+       kfree(smc->connect_info);
+       smc->connect_info = NULL;
+       release_sock(&smc->sk);
+}
+
 static int smc_connect(struct socket *sock, struct sockaddr *addr,
                       int alen, int flags)
 {
@@ -605,15 +642,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
 
        smc_copy_sock_settings_to_clc(smc);
        tcp_sk(smc->clcsock->sk)->syn_smc = 1;
-       rc = kernel_connect(smc->clcsock, addr, alen, flags);
-       if (rc)
-               goto out;
+       if (flags & O_NONBLOCK) {
+               if (smc->connect_info) {
+                       rc = -EALREADY;
+                       goto out;
+               }
+               smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
+               if (!smc->connect_info) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               smc->connect_info->alen = alen;
+               smc->connect_info->flags = flags ^ O_NONBLOCK;
+               memcpy(&smc->connect_info->addr, addr, alen);
+               schedule_work(&smc->connect_work);
+               rc = -EINPROGRESS;
+       } else {
+               rc = kernel_connect(smc->clcsock, addr, alen, flags);
+               if (rc)
+                       goto out;
 
-       rc = __smc_connect(smc);
-       if (rc < 0)
-               goto out;
-       else
-               rc = 0; /* success cases including fallback */
+               rc = __smc_connect(smc);
+               if (rc < 0)
+                       goto out;
+               else
+                       rc = 0; /* success cases including fallback */
+       }
 
 out:
        release_sock(sk);
@@ -1279,40 +1333,20 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
        struct sock *sk = sock->sk;
        __poll_t mask = 0;
        struct smc_sock *smc;
-       int rc;
 
        if (!sk)
                return EPOLLNVAL;
 
        smc = smc_sk(sock->sk);
-       sock_hold(sk);
-       lock_sock(sk);
        if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
                /* delegate to CLC child sock */
-               release_sock(sk);
                mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
-               lock_sock(sk);
                sk->sk_err = smc->clcsock->sk->sk_err;
-               if (sk->sk_err) {
+               if (sk->sk_err)
                        mask |= EPOLLERR;
-               } else {
-                       /* if non-blocking connect finished ... */
-                       if (sk->sk_state == SMC_INIT &&
-                           mask & EPOLLOUT &&
-                           smc->clcsock->sk->sk_state != TCP_CLOSE) {
-                               rc = __smc_connect(smc);
-                               if (rc < 0)
-                                       mask |= EPOLLERR;
-                               /* success cases including fallback */
-                               mask |= EPOLLOUT | EPOLLWRNORM;
-                       }
-               }
        } else {
-               if (sk->sk_state != SMC_CLOSED) {
-                       release_sock(sk);
+               if (sk->sk_state != SMC_CLOSED)
                        sock_poll_wait(file, sk_sleep(sk), wait);
-                       lock_sock(sk);
-               }
                if (sk->sk_err)
                        mask |= EPOLLERR;
                if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1338,10 +1372,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
                }
                if (smc->conn.urg_state == SMC_URG_VALID)
                        mask |= EPOLLPRI;
-
        }
-       release_sock(sk);
-       sock_put(sk);
 
        return mask;
 }
index 51ae1f10d81aa9390e76e392096e3f93c15b65fe..d7ca265704821a1862f84f209550c4b19fc0db59 100644 (file)
@@ -187,11 +187,19 @@ struct smc_connection {
        struct work_struct      close_work;     /* peer sent some closing */
 };
 
+struct smc_connect_info {
+       int                     flags;
+       int                     alen;
+       struct sockaddr         addr;
+};
+
 struct smc_sock {                              /* smc sock container */
        struct sock             sk;
        struct socket           *clcsock;       /* internal tcp socket */
        struct smc_connection   conn;           /* smc connection */
        struct smc_sock         *listen_smc;    /* listen parent */
+       struct smc_connect_info *connect_info;  /* connect address & flags */
+       struct work_struct      connect_work;   /* handle non-blocking connect*/
        struct work_struct      tcp_listen_work;/* handle tcp socket accepts */
        struct work_struct      smc_listen_work;/* prepare new accept socket */
        struct list_head        accept_q;       /* sockets to be accepted */
index 373836615c5773de94e1bb47ad0676df886521e2..625acb27efcc272ccdc0f60d4d693d6761ed139b 100644 (file)
@@ -35,7 +35,6 @@ struct _strp_msg {
         */
        struct strp_msg strp;
        int accum_len;
-       int early_eaten;
 };
 
 static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
        head = strp->skb_head;
        if (head) {
                /* Message already in progress */
-
-               stm = _strp_msg(head);
-               if (unlikely(stm->early_eaten)) {
-                       /* Already some number of bytes on the receive sock
-                        * data saved in skb_head, just indicate they
-                        * are consumed.
-                        */
-                       eaten = orig_len <= stm->early_eaten ?
-                               orig_len : stm->early_eaten;
-                       stm->early_eaten -= eaten;
-
-                       return eaten;
-               }
-
                if (unlikely(orig_offset)) {
                        /* Getting data with a non-zero offset when a message is
                         * in progress is not expected. If it does happen, we
@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
                                }
 
                                stm->accum_len += cand_len;
+                               eaten += cand_len;
                                strp->need_bytes = stm->strp.full_len -
                                                       stm->accum_len;
-                               stm->early_eaten = cand_len;
                                STRP_STATS_ADD(strp->stats.bytes, cand_len);
                                desc->count = 0; /* Stop reading socket */
                                break;
index c7bbe5f0aae8839bdfe5ac7b7bd02c6aad8ac8dc..4eece06be1e734c0a0c8de50cdeff2475dd20a59 100644 (file)
@@ -6231,7 +6231,7 @@ do {                                                                          \
                                  nl80211_check_s32);
        /*
         * Check HT operation mode based on
-        * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+        * IEEE 802.11-2016 9.4.2.57 HT Operation element.
         */
        if (tb[NL80211_MESHCONF_HT_OPMODE]) {
                ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
@@ -6241,22 +6241,9 @@ do {                                                                         \
                                  IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
                        return -EINVAL;
 
-               if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
-                   (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                       return -EINVAL;
+               /* NON_HT_STA bit is reserved, but some programs set it */
+               ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
 
-               switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
-               case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
-                       if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
-                               return -EINVAL;
-                       break;
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
-               case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
-                       if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
-                               return -EINVAL;
-                       break;
-               }
                cfg->ht_opmode = ht_opmode;
                mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
@@ -10962,9 +10949,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
                                    rem) {
                        u8 *mask_pat;
 
-                       nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                        nl80211_packet_pattern_policy,
-                                        info->extack);
+                       err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                              nl80211_packet_pattern_policy,
+                                              info->extack);
+                       if (err)
+                               goto error;
+
                        err = -EINVAL;
                        if (!pat_tb[NL80211_PKTPAT_MASK] ||
                            !pat_tb[NL80211_PKTPAT_PATTERN])
@@ -11213,8 +11203,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
                            rem) {
                u8 *mask_pat;
 
-               nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
-                                nl80211_packet_pattern_policy, NULL);
+               err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+                                      nl80211_packet_pattern_policy, NULL);
+               if (err)
+                       return err;
+
                if (!pat_tb[NL80211_PKTPAT_MASK] ||
                    !pat_tb[NL80211_PKTPAT_PATTERN])
                        return -EINVAL;
index 6673cdb9f55cab3fb32faaca755f805e8c10ed8f..a7e94e7ff87df5f60f7a57522de77b5929e46029 100644 (file)
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
        struct ethhdr *eth = data;
        struct ipv6hdr *ip6h;
        struct iphdr *iph;
-       int out_index;
        u16 h_proto;
        u64 nh_off;
+       int rc;
 
        nh_off = sizeof(*eth);
        if (data + nh_off > data_end)
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
        fib_params.ifindex = ctx->ingress_ifindex;
 
-       out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
+       rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
 
        /* verify egress index has xdp support
         * TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
         * NOTE: without verification that egress index supports XDP
         *       forwarding packets are dropped.
         */
-       if (out_index > 0) {
+       if (rc == 0) {
                if (h_proto == htons(ETH_P_IP))
                        ip_decrease_ttl(iph);
                else if (h_proto == htons(ETH_P_IPV6))
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
 
                memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
                memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
-               return bpf_redirect_map(&tx_port, out_index, 0);
+               return bpf_redirect_map(&tx_port, fib_params.ifindex, 0);
        }
 
        return XDP_PASS;
index 208eb2825dab017a9d3fdc0bdb8beef053b5626d..6efcead3198989d2ab2ab6772c72d8bb61c89c4e 100755 (executable)
@@ -1,7 +1,7 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
-cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1
 #include <stdio.h>
 int main(void)
 {
index 05f42a46d6ed35482c96ab9aed74915dd36fc284..959aa53ab6789f839442326359701b17ba9e337c 100644 (file)
@@ -694,15 +694,19 @@ static int do_load(int argc, char **argv)
                return -1;
        }
 
-       if (do_pin_fd(prog_fd, argv[1])) {
-               p_err("failed to pin program");
-               return -1;
-       }
+       if (do_pin_fd(prog_fd, argv[1]))
+               goto err_close_obj;
 
        if (json_output)
                jsonw_null(json_wtr);
 
+       bpf_object__close(obj);
+
        return 0;
+
+err_close_obj:
+       bpf_object__close(obj);
+       return -1;
 }
 
 static int do_help(int argc, char **argv)
index 7eb613ffef55a827850aaa091f94033212f15da8..b4994a94968bfd9d12965fd630cba7e99458a30a 100644 (file)
@@ -6,6 +6,7 @@ CONFIG_TEST_BPF=m
 CONFIG_CGROUP_BPF=y
 CONFIG_NETDEVSIM=m
 CONFIG_NET_CLS_ACT=y
+CONFIG_NET_SCHED=y
 CONFIG_NET_SCH_INGRESS=y
 CONFIG_NET_IPIP=y
 CONFIG_IPV6=y
index 35669ccd4d23b26c7505e8829bcf3876e3bcb3e1..9df0d2ac45f8453b9529c4ea90fb19dba3f86480 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/sh
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ "$(id -u)" != "0" ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 SRC_TREE=../../../../
 
 test_run()
index ce2e15e4f9760e205ed8e91ab5260a23172ab2e5..677686198df34d799e67c0eb15ab25f4b68eba4c 100755 (executable)
@@ -1,6 +1,15 @@
 #!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 GREEN='\033[0;92m'
 RED='\033[0;31m'
 NC='\033[0m' # No Color
index 1c77994b5e713dfe8aae357dd083c4713080f62a..270fa8f49573207bc973cce2302b53341a6fec5b 100755 (executable)
 # An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this
 # datagram can be read on NS6 when binding to fb00::6.
 
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+       echo $msg please run this as root >&2
+       exit $ksft_skip
+fi
+
 TMP_FILE="/tmp/selftest_lwt_seg6local.txt"
 
 cleanup()
index 05c8cb71724ae8c1d8d7c3e3453bce9a83092b96..9e78df207919366fbdd048f7645568ac701ad5f5 100644 (file)
@@ -1413,18 +1413,12 @@ out:
 
 int main(int argc, char **argv)
 {
-       struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
        int iov_count = 1, length = 1024, rate = 1;
        struct sockmap_options options = {0};
        int opt, longindex, err, cg_fd = 0;
        char *bpf_file = BPF_SOCKMAP_FILENAME;
        int test = PING_PONG;
 
-       if (setrlimit(RLIMIT_MEMLOCK, &r)) {
-               perror("setrlimit(RLIMIT_MEMLOCK)");
-               return 1;
-       }
-
        if (argc < 2)
                return test_suite();
 
old mode 100644 (file)
new mode 100755 (executable)