Merge tag 'mac80211-for-davem-2017-01-06' of git://git.kernel.org/pub/scm/linux/kerne...
authorDavid S. Miller <davem@davemloft.net>
Fri, 6 Jan 2017 21:26:19 +0000 (16:26 -0500)
committerDavid S. Miller <davem@davemloft.net>
Fri, 6 Jan 2017 21:26:19 +0000 (16:26 -0500)
Johannes Berg says:

====================
Another single fix, to correctly handle destruction of a
single netlink socket having ownership of multiple objects
(scheduled scan requests and interfaces.)
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
82 files changed:
Documentation/DocBook/Makefile
Documentation/block/queue-sysfs.txt
Documentation/networking/mpls-sysctl.txt
Documentation/unaligned-memory-access.txt
MAINTAINERS
Makefile
arch/openrisc/kernel/vmlinux.lds.S
arch/parisc/include/asm/thread_info.h
arch/parisc/kernel/time.c
arch/parisc/mm/fault.c
arch/s390/include/asm/asm-prototypes.h [new file with mode: 0644]
arch/s390/kernel/vtime.c
arch/x86/include/asm/bitops.h
block/blk-wbt.c
crypto/testmgr.c
drivers/crypto/marvell/cesa.h
drivers/crypto/marvell/hash.c
drivers/crypto/marvell/tdma.c
drivers/hid/hid-asus.c
drivers/hid/hid-ids.h
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sony.c
drivers/hid/usbhid/hid-quirks.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/cadence/macb_pci.c
drivers/net/ethernet/cavium/Kconfig
drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
drivers/net/phy/dp83867.c
drivers/net/usb/asix_devices.c
drivers/net/vrf.c
drivers/nvme/host/core.c
drivers/nvme/host/fc.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/scsi.c
drivers/nvme/target/admin-cmd.c
drivers/nvme/target/fcloop.c
drivers/video/fbdev/cobalt_lcdfb.c
fs/block_dev.c
fs/buffer.c
fs/crypto/keyinfo.c
fs/crypto/policy.c
fs/dax.c
fs/ext2/inode.c
fs/ext4/file.c
fs/xfs/libxfs/xfs_ag_resv.c
fs/xfs/libxfs/xfs_refcount_btree.c
fs/xfs/libxfs/xfs_refcount_btree.h
fs/xfs/libxfs/xfs_rmap_btree.c
fs/xfs/libxfs/xfs_rmap_btree.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_icache.c
fs/xfs/xfs_refcount_item.c
fs/xfs/xfs_sysfs.c
include/linux/dax.h
include/linux/genhd.h
include/linux/page-flags.h
mm/filemap.c
mm/truncate.c
net/bridge/br_netfilter_hooks.c
net/core/drop_monitor.c
net/core/flow_dissector.c
net/ipv4/fib_frontend.c
net/ipv4/igmp.c
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv6/ip6_vti.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_payload.c
net/netfilter/nft_queue.c
net/netfilter/nft_quota.c
scripts/gcc-plugins/gcc-common.h
scripts/gcc-plugins/latent_entropy_plugin.c

index c75e5d6b8fa8d48b787eed2a0f926bc36fe0a930..a6eb7dcd4dd5c010fe76ac285769d9e0c5157adc 100644 (file)
@@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml  \
            kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
            gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
            genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
-           80211.xml sh.xml regulator.xml w1.xml \
+           sh.xml regulator.xml w1.xml \
            writing_musb_glue_layer.xml iio.xml
 
 ifeq ($(DOCBOOKS),)
index 51642159aedbbc405d1bb90fa89402c2143f8310..c0a3bb5a6e4eb291d077f10633001c439563ccc2 100644 (file)
@@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes.
 
 io_poll (RW)
 ------------
-When read, this file shows the total number of block IO polls and how
-many returned success.  Writing '0' to this file will disable polling
-for this device.  Writing any non-zero value will enable this feature.
+When read, this file shows whether polling is enabled (1) or disabled
+(0).  Writing '0' to this file will disable polling for this device.
+Writing any non-zero value will enable this feature.
 
 io_poll_delay (RW)
 ------------------
index 9ed15f86c17c86ffa69fa3527e932b62f4b9ee20..15d8d16934fd13727bb35e9c5078484127bbfa30 100644 (file)
@@ -5,8 +5,8 @@ platform_labels - INTEGER
        possible to configure forwarding for label values equal to or
        greater than the number of platform labels.
 
-       A dense utliziation of the entries in the platform label table
-       is possible and expected aas the platform labels are locally
+       A dense utilization of the entries in the platform label table
+       is possible and expected as the platform labels are locally
        allocated.
 
        If the number of platform label table entries is set to 0 no
index a445da098bc6e5aa733cd55ca2ee8b4a5f04dc2c..3f76c0c379206a72519e864fb5486abf1a75ac0f 100644 (file)
@@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
 #else
        const u16 *a = (const u16 *)addr1;
        const u16 *b = (const u16 *)addr2;
-       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
+       return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
 #endif
 }
 
index cfff2c9e3d9470550fd47dcd7b2638c77121c607..ea11bb03f550eb41e6de2e1b5cc9f8087a31b3bd 100644 (file)
@@ -5080,9 +5080,11 @@ F:       drivers/net/wan/dlci.c
 F:     drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
+M:     Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
 L:     linux-fbdev@vger.kernel.org
+T:     git git://github.com/bzolnier/linux.git
 Q:     http://patchwork.kernel.org/project/linux-fbdev/list/
-S:     Orphan
+S:     Maintained
 F:     Documentation/fb/
 F:     drivers/video/
 F:     include/video/
@@ -8852,17 +8854,22 @@ F:      drivers/video/fbdev/nvidia/
 NVM EXPRESS DRIVER
 M:     Keith Busch <keith.busch@intel.com>
 M:     Jens Axboe <axboe@fb.com>
+M:     Christoph Hellwig <hch@lst.de>
+M:     Sagi Grimberg <sagi@grimberg.me>
 L:     linux-nvme@lists.infradead.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
-W:     https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
+T:     git://git.infradead.org/nvme.git
+W:     http://git.infradead.org/nvme.git
 S:     Supported
 F:     drivers/nvme/host/
 F:     include/linux/nvme.h
+F:     include/uapi/linux/nvme_ioctl.h
 
 NVM EXPRESS TARGET DRIVER
 M:     Christoph Hellwig <hch@lst.de>
 M:     Sagi Grimberg <sagi@grimberg.me>
 L:     linux-nvme@lists.infradead.org
+T:     git://git.infradead.org/nvme.git
+W:     http://git.infradead.org/nvme.git
 S:     Supported
 F:     drivers/nvme/target/
 
@@ -13527,11 +13534,11 @@ F:    arch/x86/xen/*swiotlb*
 F:     drivers/xen/*swiotlb*
 
 XFS FILESYSTEM
-M:     Dave Chinner <david@fromorbit.com>
+M:     Darrick J. Wong <darrick.wong@oracle.com>
 M:     linux-xfs@vger.kernel.org
 L:     linux-xfs@vger.kernel.org
 W:     http://xfs.org/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git
+T:     git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
 S:     Supported
 F:     Documentation/filesystems/xfs.txt
 F:     fs/xfs/
index ec411ba9e40f98376bb594de459e98413f0dfae9..5470d599384a5ba676a60490e19baf81a1068b65 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Roaring Lionus
 
 # *DOCUMENTATION*
index ef31fc24344e983c67b6f2b9c185a9275d456ed1..552544616b9d93ab6838a998ffb0b6ad9a7f1574 100644 (file)
@@ -44,6 +44,8 @@ SECTIONS
         /* Read-only sections, merged into text segment: */
         . = LOAD_BASE ;
 
+       _text = .;
+
        /* _s_kernel_ro must be page aligned */
        . = ALIGN(PAGE_SIZE);
        _s_kernel_ro = .;
index 7581330ea35be1e15498cf5cef9bbcbd3889aab9..88fe0aad4390b10830ce1bc1be62925d4b2d4bbc 100644 (file)
@@ -49,7 +49,6 @@ struct thread_info {
 #define TIF_POLLING_NRFLAG     3       /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_32BIT               4       /* 32 bit binary */
 #define TIF_MEMDIE             5       /* is terminating due to OOM killer */
-#define TIF_RESTORE_SIGMASK    6       /* restore saved signal mask */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_NOTIFY_RESUME      8       /* callback before returning to user */
 #define TIF_SINGLESTEP         9       /* single stepping? */
index da0d9cb63403d4b3b4f9647cf8415dd11db30fd3..1e22f981cd81fb0cf840407210d499cab0319e0e 100644 (file)
@@ -235,9 +235,26 @@ void __init time_init(void)
 
        cr16_hz = 100 * PAGE0->mem_10msec;  /* Hz */
 
-       /* register at clocksource framework */
-       clocksource_register_hz(&clocksource_cr16, cr16_hz);
-
        /* register as sched_clock source */
        sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
 }
+
+static int __init init_cr16_clocksource(void)
+{
+       /*
+        * The cr16 interval timers are not syncronized across CPUs, so mark
+        * them unstable and lower rating on SMP systems.
+        */
+       if (num_online_cpus() > 1) {
+               clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
+               clocksource_cr16.rating = 0;
+       }
+
+       /* register at clocksource framework */
+       clocksource_register_hz(&clocksource_cr16,
+               100 * PAGE0->mem_10msec);
+
+       return 0;
+}
+
+device_initcall(init_cr16_clocksource);
index 8ff9253930af776b5ca7d408f4bd134cdb88c9b0..1a0b4f63f0e90fbb4e4cb58ea6da95d326bcba3c 100644 (file)
@@ -234,7 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
            tsk->comm, code, address);
        print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
 
-       pr_cont(" trap #%lu: %s%c", code, trap_name(code),
+       pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
                vma ? ',':'\n');
 
        if (vma)
diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
new file mode 100644 (file)
index 0000000..2c3413b
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _ASM_S390_PROTOTYPES_H
+
+#include <linux/kvm_host.h>
+#include <linux/ftrace.h>
+#include <asm/fpu/api.h>
+#include <asm-generic/asm-prototypes.h>
+
+#endif /* _ASM_S390_PROTOTYPES_H */
index 6b246aadf311706849341dac2d0eafee293340f1..1b5c5ee9fc1b60878844cd67cb6a6cb12800a563 100644 (file)
@@ -94,7 +94,7 @@ static void update_mt_scaling(void)
  * Update process times based on virtual cpu times stored by entry.S
  * to the lowcore fields user_timer, system_timer & steal_clock.
  */
-static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
+static int do_account_vtime(struct task_struct *tsk)
 {
        u64 timer, clock, user, system, steal;
        u64 user_scaled, system_scaled;
@@ -138,7 +138,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
        }
        account_user_time(tsk, user);
        tsk->utimescaled += user_scaled;
-       account_system_time(tsk, hardirq_offset, system);
+       account_system_time(tsk, 0, system);
        tsk->stimescaled += system_scaled;
 
        steal = S390_lowcore.steal_timer;
@@ -152,7 +152,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
 
 void vtime_task_switch(struct task_struct *prev)
 {
-       do_account_vtime(prev, 0);
+       do_account_vtime(prev);
        prev->thread.user_timer = S390_lowcore.user_timer;
        prev->thread.system_timer = S390_lowcore.system_timer;
        S390_lowcore.user_timer = current->thread.user_timer;
@@ -166,7 +166,7 @@ void vtime_task_switch(struct task_struct *prev)
  */
 void vtime_account_user(struct task_struct *tsk)
 {
-       if (do_account_vtime(tsk, HARDIRQ_OFFSET))
+       if (do_account_vtime(tsk))
                virt_timer_expire();
 }
 
index 68557f52b9619ddfed7681fc43c2d0530c625f0f..854022772c5be4d49d2697bd2b66b454f49c9e6f 100644 (file)
@@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
        asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
 }
 
+static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+       bool negative;
+       asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
+               CC_SET(s)
+               : CC_OUT(s) (negative), ADDR
+               : "ir" ((char) ~(1 << nr)) : "memory");
+       return negative;
+}
+
+// Let everybody know we have it
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+
 /*
  * __clear_bit_unlock - Clears a bit in memory
  * @nr: Bit to clear
index 6e82769f4042c2f57af7976b1b7b4342eea1306d..f0a9c07b4c7a5ef9e96985a89c5c000d62a78cd0 100644 (file)
@@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
  * the timer to kick off queuing again.
  */
 static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
+       __releases(lock)
+       __acquires(lock)
 {
        struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
        DEFINE_WAIT(wait);
@@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
                if (may_queue(rwb, rqw, &wait, rw))
                        break;
 
-               if (lock)
+               if (lock) {
                        spin_unlock_irq(lock);
-
-               io_schedule();
-
-               if (lock)
+                       io_schedule();
                        spin_lock_irq(lock);
+               } else
+                       io_schedule();
        } while (1);
 
        finish_wait(&rqw->wait, &wait);
@@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
  * in an irq held spinlock, if it holds one when calling this function.
  * If we do sleep, we'll release and re-grab it.
  */
-unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
+enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
 {
        unsigned int ret = 0;
 
index f616ad74cce756fb2d0a0657d153483ed05f56d7..44e888b0b041944b44e8aa5d75619e51d7fe52a2 100644 (file)
@@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
        for (i = 0; i < ctcount; i++) {
                unsigned int dlen = COMP_BUF_SIZE;
                int ilen = ctemplate[i].inlen;
+               void *input_vec;
 
+               input_vec = kmalloc(ilen, GFP_KERNEL);
+               if (!input_vec) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+
+               memcpy(input_vec, ctemplate[i].input, ilen);
                memset(output, 0, dlen);
                init_completion(&result.completion);
-               sg_init_one(&src, ctemplate[i].input, ilen);
+               sg_init_one(&src, input_vec, ilen);
                sg_init_one(&dst, output, dlen);
 
                req = acomp_request_alloc(tfm);
                if (!req) {
                        pr_err("alg: acomp: request alloc failed for %s\n",
                               algo);
+                       kfree(input_vec);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                if (ret) {
                        pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
                               i + 1, algo, -ret);
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                        pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
                               i + 1, algo, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                               i + 1, algo);
                        hexdump(output, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
 
+               kfree(input_vec);
                acomp_request_free(req);
        }
 
        for (i = 0; i < dtcount; i++) {
                unsigned int dlen = COMP_BUF_SIZE;
                int ilen = dtemplate[i].inlen;
+               void *input_vec;
+
+               input_vec = kmalloc(ilen, GFP_KERNEL);
+               if (!input_vec) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
 
+               memcpy(input_vec, dtemplate[i].input, ilen);
                memset(output, 0, dlen);
                init_completion(&result.completion);
-               sg_init_one(&src, dtemplate[i].input, ilen);
+               sg_init_one(&src, input_vec, ilen);
                sg_init_one(&dst, output, dlen);
 
                req = acomp_request_alloc(tfm);
                if (!req) {
                        pr_err("alg: acomp: request alloc failed for %s\n",
                               algo);
+                       kfree(input_vec);
                        ret = -ENOMEM;
                        goto out;
                }
@@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                if (ret) {
                        pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
                               i + 1, algo, -ret);
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                        pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
                               i + 1, algo, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
@@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
                               i + 1, algo);
                        hexdump(output, req->dlen);
                        ret = -EINVAL;
+                       kfree(input_vec);
                        acomp_request_free(req);
                        goto out;
                }
 
+               kfree(input_vec);
                acomp_request_free(req);
        }
 
index a768da7138a1cd4a0a79771ad99b2f5436f481c3..b7872f62f67475fdc6889420c38de4f8418ffafd 100644 (file)
@@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
 #define CESA_TDMA_SRC_IN_SRAM                  BIT(30)
 #define CESA_TDMA_END_OF_REQ                   BIT(29)
 #define CESA_TDMA_BREAK_CHAIN                  BIT(28)
-#define CESA_TDMA_TYPE_MSK                     GENMASK(27, 0)
+#define CESA_TDMA_SET_STATE                    BIT(27)
+#define CESA_TDMA_TYPE_MSK                     GENMASK(26, 0)
 #define CESA_TDMA_DUMMY                                0
 #define CESA_TDMA_DATA                         1
 #define CESA_TDMA_OP                           2
index 317cf029c0cf1beab3f2b5f0ab41cf187626e24b..77c0fb936f4794363f478b7bc0b8d23dd10d880f 100644 (file)
@@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
        sreq->offset = 0;
 }
 
+static void mv_cesa_ahash_dma_step(struct ahash_request *req)
+{
+       struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
+       struct mv_cesa_req *base = &creq->base;
+
+       /* We must explicitly set the digest state. */
+       if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
+               struct mv_cesa_engine *engine = base->engine;
+               int i;
+
+               /* Set the hash state in the IVDIG regs. */
+               for (i = 0; i < ARRAY_SIZE(creq->state); i++)
+                       writel_relaxed(creq->state[i], engine->regs +
+                                      CESA_IVDIG(i));
+       }
+
+       mv_cesa_dma_step(base);
+}
+
 static void mv_cesa_ahash_step(struct crypto_async_request *req)
 {
        struct ahash_request *ahashreq = ahash_request_cast(req);
        struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
 
        if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
-               mv_cesa_dma_step(&creq->base);
+               mv_cesa_ahash_dma_step(ahashreq);
        else
                mv_cesa_ahash_std_step(ahashreq);
 }
@@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
        struct mv_cesa_ahash_dma_iter iter;
        struct mv_cesa_op_ctx *op = NULL;
        unsigned int frag_len;
+       bool set_state = false;
        int ret;
        u32 type;
 
        basereq->chain.first = NULL;
        basereq->chain.last = NULL;
 
+       if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
+               set_state = true;
+
        if (creq->src_nents) {
                ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
                                 DMA_TO_DEVICE);
@@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
        if (type != CESA_TDMA_RESULT)
                basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
 
+       if (set_state) {
+               /*
+                * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
+                * let the step logic know that the IVDIG registers should be
+                * explicitly set before launching a TDMA chain.
+                */
+               basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
+       }
+
        return 0;
 
 err_free_tdma:
index 4416b88eca708aff6aadbff76ec04d95f6b40dbb..c76375ff376d39e5dc2e74463a310231b58f4c86 100644 (file)
@@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
                last->next = dreq->chain.first;
                engine->chain.last = dreq->chain.last;
 
-               if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
+               /*
+                * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
+                * the last element of the current chain, or if the request
+                * being queued needs the IV regs to be set before lauching
+                * the request.
+                */
+               if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
+                   !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
                        last->next_dma = dreq->chain.first->cur_dma;
        }
 }
index d40ed9fdf68d990ce2b138579bc6404a6fae7d99..70b12f89a193dc369273cea5565bf804a78cc50a 100644 (file)
@@ -64,7 +64,8 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
 #define QUIRK_SKIP_INPUT_MAPPING       BIT(2)
 #define QUIRK_IS_MULTITOUCH            BIT(3)
 
-#define NOTEBOOK_QUIRKS                        QUIRK_FIX_NOTEBOOK_REPORT
+#define KEYBOARD_QUIRKS                        (QUIRK_FIX_NOTEBOOK_REPORT | \
+                                                QUIRK_NO_INIT_REPORTS)
 #define TOUCHPAD_QUIRKS                        (QUIRK_NO_INIT_REPORTS | \
                                                 QUIRK_SKIP_INPUT_MAPPING | \
                                                 QUIRK_IS_MULTITOUCH)
@@ -170,11 +171,11 @@ static int asus_raw_event(struct hid_device *hdev,
 
 static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
 {
+       struct input_dev *input = hi->input;
        struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
 
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
                int ret;
-               struct input_dev *input = hi->input;
 
                input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
                input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
@@ -191,10 +192,10 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
                        hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
                        return ret;
                }
-
-               drvdata->input = input;
        }
 
+       drvdata->input = input;
+
        return 0;
 }
 
@@ -286,7 +287,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
                goto err_stop_hw;
        }
 
-       drvdata->input->name = "Asus TouchPad";
+       if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
+               drvdata->input->name = "Asus TouchPad";
+       } else {
+               drvdata->input->name = "Asus Keyboard";
+       }
 
        if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
                ret = asus_start_multitouch(hdev);
@@ -315,7 +320,7 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
 
 static const struct hid_device_id asus_devices[] = {
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
-                USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
+                USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), KEYBOARD_QUIRKS},
        { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
                         USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
        { }
index ec277b96eaa1b33461aa7702f38864598b910e59..54bd22dc14110c308744f28f01a7ab4cff79ff95 100644 (file)
 #define USB_VENDOR_ID_DRAGONRISE               0x0079
 #define USB_DEVICE_ID_DRAGONRISE_WIIU          0x1800
 #define USB_DEVICE_ID_DRAGONRISE_PS3           0x1801
+#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR    0x1803
 #define USB_DEVICE_ID_DRAGONRISE_GAMECUBE      0x1843
 
 #define USB_VENDOR_ID_DWAV             0x0eef
 #define USB_VENDOR_ID_FLATFROG         0x25b5
 #define USB_DEVICE_ID_MULTITOUCH_3200  0x0002
 
+#define USB_VENDOR_ID_FUTABA            0x0547
+#define USB_DEVICE_ID_LED_DISPLAY       0x7000
+
 #define USB_VENDOR_ID_ESSENTIAL_REALITY        0x0d7f
 #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
 
index 5c925228847c8e88653ec5fb00edfc053fa81784..4ef73374a8f9881136cabeda32a67c6a21d53a85 100644 (file)
@@ -212,7 +212,6 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        __s32 value;
        int ret = 0;
 
-       memset(buffer, 0, buffer_size);
        mutex_lock(&data->mutex);
        report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
        if (!report || (field_index >= report->maxfield)) {
@@ -256,6 +255,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
        int buffer_index = 0;
        int i;
 
+       memset(buffer, 0, buffer_size);
+
        mutex_lock(&data->mutex);
        report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
        if (!report || (field_index >= report->maxfield) ||
index 7687c0875395d6b351928a156be6f5268cf50cfe..f405b07d03816506215bd19fe3c878393370484a 100644 (file)
@@ -1099,8 +1099,11 @@ struct sony_sc {
        u8 led_delay_on[MAX_LEDS];
        u8 led_delay_off[MAX_LEDS];
        u8 led_count;
+       bool ds4_dongle_connected;
 };
 
+static void sony_set_leds(struct sony_sc *sc);
+
 static inline void sony_schedule_work(struct sony_sc *sc)
 {
        if (!sc->defer_initialization)
@@ -1430,6 +1433,31 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
                                return -EILSEQ;
                        }
                }
+
+               /*
+                * In the case of a DS4 USB dongle, bit[2] of byte 31 indicates
+                * if a DS4 is actually connected (indicated by '0').
+                * For non-dongle, this bit is always 0 (connected).
+                */
+               if (sc->hdev->vendor == USB_VENDOR_ID_SONY &&
+                   sc->hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) {
+                       bool connected = (rd[31] & 0x04) ? false : true;
+
+                       if (!sc->ds4_dongle_connected && connected) {
+                               hid_info(sc->hdev, "DualShock 4 USB dongle: controller connected\n");
+                               sony_set_leds(sc);
+                               sc->ds4_dongle_connected = true;
+                       } else if (sc->ds4_dongle_connected && !connected) {
+                               hid_info(sc->hdev, "DualShock 4 USB dongle: controller disconnected\n");
+                               sc->ds4_dongle_connected = false;
+                               /* Return 0, so hidraw can get the report. */
+                               return 0;
+                       } else if (!sc->ds4_dongle_connected) {
+                               /* Return 0, so hidraw can get the report. */
+                               return 0;
+                       }
+               }
+
                dualshock4_parse_report(sc, rd, size);
        }
 
@@ -2390,6 +2418,12 @@ static int sony_check_add(struct sony_sc *sc)
                }
 
                memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
+
+               snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
+                       "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
+                       sc->mac_address[5], sc->mac_address[4],
+                       sc->mac_address[3], sc->mac_address[2],
+                       sc->mac_address[1], sc->mac_address[0]);
        } else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
                        (sc->quirks & NAVIGATION_CONTROLLER_USB)) {
                buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
@@ -2548,7 +2582,7 @@ static int sony_input_configured(struct hid_device *hdev,
                        hid_err(sc->hdev,
                        "Unable to initialize multi-touch slots: %d\n",
                        ret);
-                       return ret;
+                       goto err_stop;
                }
 
                sony_init_output_report(sc, dualshock4_send_output_report);
index b3e01c82af0512dce7a68e6bde908d7e3afeaba8..e9d6cc7cdfc5c8019422d45914dc0363448bcb12 100644 (file)
@@ -83,11 +83,13 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
        { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
index 155190db682d29a6a97b2267550954fb4eba639d..9943629fcbf9ae14a9683e0b2eb0da459f83c0c6 100644 (file)
@@ -539,6 +539,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
                }
        }
 
+isr_done:
        /* If there is not a separate AN irq, handle it here */
        if (pdata->dev_irq == pdata->an_irq)
                pdata->phy_if.an_isr(irq, pdata);
@@ -551,7 +552,6 @@ static irqreturn_t xgbe_isr(int irq, void *data)
        if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
                pdata->i2c_if.i2c_isr(irq, pdata);
 
-isr_done:
        return IRQ_HANDLED;
 }
 
index 25d1eb4933d0b8b28dc53d07344eb6a47a263893..7e8cf213fd813d8530f65c8439a77fd16ffeff9b 100644 (file)
@@ -1012,15 +1012,6 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
                goto out;
        }
 
-       /* Insert TSB and checksum infos */
-       if (priv->tsb_en) {
-               skb = bcm_sysport_insert_tsb(skb, dev);
-               if (!skb) {
-                       ret = NETDEV_TX_OK;
-                       goto out;
-               }
-       }
-
        /* The Ethernet switch we are interfaced with needs packets to be at
         * least 64 bytes (including FCS) otherwise they will be discarded when
         * they enter the switch port logic. When Broadcom tags are enabled, we
@@ -1028,13 +1019,21 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
         * (including FCS and tag) because the length verification is done after
         * the Broadcom tag is stripped off the ingress packet.
         */
-       if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+       if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
                ret = NETDEV_TX_OK;
                goto out;
        }
 
-       skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
-                       ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+       /* Insert TSB and checksum infos */
+       if (priv->tsb_en) {
+               skb = bcm_sysport_insert_tsb(skb, dev);
+               if (!skb) {
+                       ret = NETDEV_TX_OK;
+                       goto out;
+               }
+       }
+
+       skb_len = skb->len;
 
        mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
        if (dma_mapping_error(kdev, mapping)) {
index 92be2cd8f817caef709fce5de320663bd44c9dc8..9906fda76087c013da8f1b2ca73a3cf5709480d3 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * macb_pci.c - Cadence GEM PCI wrapper.
+ * Cadence GEM PCI wrapper.
  *
  * Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
  *
@@ -45,32 +45,27 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct macb_platform_data plat_data;
        struct resource res[2];
 
-       /* sanity check */
-       if (!id)
-               return -EINVAL;
-
        /* enable pci device */
-       err = pci_enable_device(pdev);
+       err = pcim_enable_device(pdev);
        if (err < 0) {
-               dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
-                       err);
-               return -EACCES;
+               dev_err(&pdev->dev, "Enabling PCI device has failed: %d", err);
+               return err;
        }
 
        pci_set_master(pdev);
 
        /* set up resources */
        memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
-       res[0].start = pdev->resource[0].start;
-       res[0].end = pdev->resource[0].end;
+       res[0].start = pci_resource_start(pdev, 0);
+       res[0].end = pci_resource_end(pdev, 0);
        res[0].name = PCI_DRIVER_NAME;
        res[0].flags = IORESOURCE_MEM;
-       res[1].start = pdev->irq;
+       res[1].start = pci_irq_vector(pdev, 0);
        res[1].name = PCI_DRIVER_NAME;
        res[1].flags = IORESOURCE_IRQ;
 
-       dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
-                (void *)(uintptr_t)pci_resource_start(pdev, 0));
+       dev_info(&pdev->dev, "EMAC physical base addr: %pa\n",
+                &res[0].start);
 
        /* set up macb platform data */
        memset(&plat_data, 0, sizeof(plat_data));
@@ -100,7 +95,7 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        plat_info.num_res = ARRAY_SIZE(res);
        plat_info.data = &plat_data;
        plat_info.size_data = sizeof(plat_data);
-       plat_info.dma_mask = DMA_BIT_MASK(32);
+       plat_info.dma_mask = pdev->dma_mask;
 
        /* register platform device */
        plat_dev = platform_device_register_full(&plat_info);
@@ -120,7 +115,6 @@ err_hclk_register:
        clk_unregister(plat_data.pclk);
 
 err_pclk_register:
-       pci_disable_device(pdev);
        return err;
 }
 
@@ -130,7 +124,6 @@ static void macb_remove(struct pci_dev *pdev)
        struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
 
        platform_device_unregister(plat_dev);
-       pci_disable_device(pdev);
        clk_unregister(plat_data->pclk);
        clk_unregister(plat_data->hclk);
 }
index bbc8bd16cb971d6e2df3035d20ccd4151791a660..dcbce6cac63e2db7a936a7feea312b25ba3f2421 100644 (file)
@@ -77,7 +77,7 @@ config OCTEON_MGMT_ETHERNET
 config LIQUIDIO_VF
        tristate "Cavium LiquidIO VF support"
        depends on 64BIT && PCI_MSI
-       select PTP_1588_CLOCK
+       imply PTP_1588_CLOCK
        ---help---
          This driver supports Cavium LiquidIO Intelligent Server Adapter
          based on CN23XX chips.
index 0f0de5b63622d8ebfb98c124d5768e806a3689e8..d04a6c1634452034217e9dc4ab8771bf02899b86 100644 (file)
@@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
                if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
                        fl6.flowi6_oif = sin6_scope_id;
                dst = ip6_route_output(&init_net, NULL, &fl6);
-               if (!dst)
-                       goto out;
-               if (!cxgb_our_interface(lldi, get_real_dev,
-                                       ip6_dst_idev(dst)->dev) &&
-                   !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+               if (dst->error ||
+                   (!cxgb_our_interface(lldi, get_real_dev,
+                                        ip6_dst_idev(dst)->dev) &&
+                    !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
                        dst_release(dst);
-                       dst = NULL;
+                       return NULL;
                }
        }
 
-out:
        return dst;
 }
 EXPORT_SYMBOL(cxgb_find_route6);
index 7e1633bf5a22ccf1c9c123ba541349b5dfea1064..225e9a4877d7b16e058cfa7c0dac9ccb5434bb5b 100644 (file)
@@ -5155,7 +5155,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
            skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
            skb->inner_protocol != htons(ETH_P_TEB) ||
            skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-           sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+               sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
+           !adapter->vxlan_port ||
+           udp_hdr(skb)->dest != adapter->vxlan_port)
                return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
        return features;
index 624ba9058dc46bd369f7be872b040aee1e814696..c9b7ad65e5633bc2a5147706f5c728f4d1cffa3c 100644 (file)
@@ -733,6 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
        priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
 
        /* Enable Congestion State Change Notifications and CS taildrop */
+       memset(&initcgr, 0, sizeof(initcgr));
        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
        initcgr.cgr.cscn_en = QM_CGR_EN;
 
@@ -2291,7 +2292,8 @@ static int dpaa_open(struct net_device *net_dev)
        net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
        if (!net_dev->phydev) {
                netif_err(priv, ifup, net_dev, "init_phy() failed\n");
-               return -ENODEV;
+               err = -ENODEV;
+               goto phy_init_failed;
        }
 
        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -2314,6 +2316,7 @@ mac_start_failed:
        for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
                fman_port_disable(mac_dev->port[i]);
 
+phy_init_failed:
        dpaa_eth_napi_disable(priv);
 
        return err;
@@ -2420,6 +2423,7 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
        }
 
        /* Enable CS TD, but disable Congestion State Change Notifications. */
+       memset(&initcgr, 0, sizeof(initcgr));
        initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
        initcgr.cgr.cscn_en = QM_CGR_EN;
        cs_th = DPAA_INGRESS_CS_THRESHOLD;
index 44389c90056a0f197a97f5d36478ec597f266004..8f1623bf2134700498198a98cb6aca9dddd2a6cd 100644 (file)
@@ -696,7 +696,7 @@ enum rtl_tx_desc_bit_1 {
 enum rtl_rx_desc_bit {
        /* Rx private */
        PID1            = (1 << 18), /* Protocol ID bit 1/2 */
-       PID0            = (1 << 17), /* Protocol ID bit 2/2 */
+       PID0            = (1 << 17), /* Protocol ID bit 0/2 */
 
 #define RxProtoUDP     (PID1)
 #define RxProtoTCP     (PID0)
index 0af7fc279c8560d130642e65a5e87a702699ee34..f729a6b43958cc82a1b2d38293cb50baf767f39a 100644 (file)
@@ -574,6 +574,7 @@ static struct sh_eth_cpu_data r8a7740_data = {
        .rpadir_value   = 2 << 16,
        .no_trimd       = 1,
        .no_ade         = 1,
+       .hw_crc         = 1,
        .tsu            = 1,
        .select_mii     = 1,
        .shift_rd0      = 1,
@@ -802,7 +803,7 @@ static struct sh_eth_cpu_data sh7734_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -819,6 +820,7 @@ static struct sh_eth_cpu_data sh7734_data = {
        .tsu            = 1,
        .hw_crc         = 1,
        .select_mii     = 1,
+       .shift_rd0      = 1,
 };
 
 /* SH7763 */
@@ -831,7 +833,7 @@ static struct sh_eth_cpu_data sh7763_data = {
 
        .ecsr_value     = ECSR_ICD | ECSR_MPD,
        .ecsipr_value   = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
-       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+       .eesipr_value   = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
 
        .tx_check       = EESR_TC1 | EESR_FTC,
        .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
index de2947ccc5ad7d30d9cb7bc68483154cb9d119cd..5eb0e684fd76a3de1f46210f9a9b6118d98041e3 100644 (file)
@@ -1323,7 +1323,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
        }
 
        /* don't fail init if RSS setup doesn't work */
-       efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+       rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+       efx->rss_active = (rc == 0);
 
        return 0;
 }
index 87bdc56b4e3a636450e4f39e49b37606332d8268..18ebaea44e8257255c6a910958e4b00466600903 100644 (file)
@@ -975,6 +975,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
 
        case ETHTOOL_GRXFH: {
                info->data = 0;
+               if (!efx->rss_active) /* No RSS */
+                       return 0;
                switch (info->flow_type) {
                case UDP_V4_FLOW:
                        if (efx->rx_hash_udp_4tuple)
index 1a635ced62d0581634748d4b8bdcb15a2e0da69c..1c62c1a00fca49679cb8a69a1ac1a5564be9868d 100644 (file)
@@ -860,6 +860,7 @@ struct vfdi_status;
  * @rx_hash_key: Toeplitz hash key for RSS
  * @rx_indir_table: Indirection table for RSS
  * @rx_scatter: Scatter mode enabled for receives
+ * @rss_active: RSS enabled on hardware
  * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
  * @int_error_count: Number of internal errors seen recently
  * @int_error_expire: Time at which error count will be expired
@@ -998,6 +999,7 @@ struct efx_nic {
        u8 rx_hash_key[40];
        u32 rx_indir_table[128];
        bool rx_scatter;
+       bool rss_active;
        bool rx_hash_udp_4tuple;
 
        unsigned int_error_count;
index a3901bc96586e5eff4f58105032ea2565c083769..4e54e5dc9fcb49bf03667843d47a4a2cf1aa978b 100644 (file)
@@ -403,6 +403,7 @@ static int siena_init_nic(struct efx_nic *efx)
        efx_writeo(efx, &temp, FR_AZ_RX_CFG);
 
        siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
+       efx->rss_active = true;
 
        /* Enable event logging */
        rc = efx_mcdi_log_ctrl(efx, true, false, 0);
index c35597586121e5c3b41f6453711e3ea9920af463..3dc7d279f80513a948d2b855c5a09e16f63d5686 100644 (file)
@@ -60,8 +60,9 @@ struct oxnas_dwmac {
        struct regmap   *regmap;
 };
 
-static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
 {
+       struct oxnas_dwmac *dwmac = priv;
        unsigned int value;
        int ret;
 
@@ -105,20 +106,20 @@ static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
        return 0;
 }
 
+static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+       struct oxnas_dwmac *dwmac = priv;
+
+       clk_disable_unprepare(dwmac->clk);
+}
+
 static int oxnas_dwmac_probe(struct platform_device *pdev)
 {
        struct plat_stmmacenet_data *plat_dat;
        struct stmmac_resources stmmac_res;
-       struct device_node *sysctrl;
        struct oxnas_dwmac *dwmac;
        int ret;
 
-       sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
-       if (!sysctrl) {
-               dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
-               return -EINVAL;
-       }
-
        ret = stmmac_get_platform_resources(pdev, &stmmac_res);
        if (ret)
                return ret;
@@ -128,72 +129,48 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
                return PTR_ERR(plat_dat);
 
        dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
-       if (!dwmac)
-               return -ENOMEM;
+       if (!dwmac) {
+               ret = -ENOMEM;
+               goto err_remove_config_dt;
+       }
 
        dwmac->dev = &pdev->dev;
        plat_dat->bsp_priv = dwmac;
+       plat_dat->init = oxnas_dwmac_init;
+       plat_dat->exit = oxnas_dwmac_exit;
 
-       dwmac->regmap = syscon_node_to_regmap(sysctrl);
+       dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+                                                       "oxsemi,sys-ctrl");
        if (IS_ERR(dwmac->regmap)) {
                dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
-               return PTR_ERR(dwmac->regmap);
+               ret = PTR_ERR(dwmac->regmap);
+               goto err_remove_config_dt;
        }
 
        dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
-       if (IS_ERR(dwmac->clk))
-               return PTR_ERR(dwmac->clk);
+       if (IS_ERR(dwmac->clk)) {
+               ret = PTR_ERR(dwmac->clk);
+               goto err_remove_config_dt;
+       }
 
-       ret = oxnas_dwmac_init(dwmac);
+       ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
        if (ret)
-               return ret;
+               goto err_remove_config_dt;
 
        ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
        if (ret)
-               clk_disable_unprepare(dwmac->clk);
+               goto err_dwmac_exit;
 
-       return ret;
-}
 
-static int oxnas_dwmac_remove(struct platform_device *pdev)
-{
-       struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-       int ret = stmmac_dvr_remove(&pdev->dev);
-
-       clk_disable_unprepare(dwmac->clk);
-
-       return ret;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int oxnas_dwmac_suspend(struct device *dev)
-{
-       struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-       int ret;
-
-       ret = stmmac_suspend(dev);
-       clk_disable_unprepare(dwmac->clk);
-
-       return ret;
-}
-
-static int oxnas_dwmac_resume(struct device *dev)
-{
-       struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
-       int ret;
-
-       ret = oxnas_dwmac_init(dwmac);
-       if (ret)
-               return ret;
+       return 0;
 
-       ret = stmmac_resume(dev);
+err_dwmac_exit:
+       oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+       stmmac_remove_config_dt(pdev, plat_dat);
 
        return ret;
 }
-#endif /* CONFIG_PM_SLEEP */
-
-static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
-       oxnas_dwmac_suspend, oxnas_dwmac_resume);
 
 static const struct of_device_id oxnas_dwmac_match[] = {
        { .compatible = "oxsemi,ox820-dwmac" },
@@ -203,10 +180,10 @@ MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
 
 static struct platform_driver oxnas_dwmac_driver = {
        .probe  = oxnas_dwmac_probe,
-       .remove = oxnas_dwmac_remove,
+       .remove = stmmac_pltfr_remove,
        .driver = {
                .name           = "oxnas-dwmac",
-               .pm             = &oxnas_dwmac_pm_ops,
+               .pm             = &stmmac_pltfr_pm_ops,
                .of_match_table = oxnas_dwmac_match,
        },
 };
index 1b639242f9e23170e69f7b7669ba82d8d4263fb5..e84ae084e259c90649277532f64e563e1091bf28 100644 (file)
@@ -29,6 +29,7 @@
 #define MII_DP83867_MICR       0x12
 #define MII_DP83867_ISR                0x13
 #define DP83867_CTRL           0x1f
+#define DP83867_CFG3           0x1e
 
 /* Extended Registers */
 #define DP83867_RGMIICTL       0x0032
@@ -98,6 +99,8 @@ static int dp83867_config_intr(struct phy_device *phydev)
                micr_status |=
                        (MII_DP83867_MICR_AN_ERR_INT_EN |
                        MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+                       MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
+                       MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
                        MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
                        MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
 
@@ -214,6 +217,13 @@ static int dp83867_config_init(struct phy_device *phydev)
                }
        }
 
+       /* Enable Interrupt output INT_OE in CFG3 register */
+       if (phy_interrupt_is_valid(phydev)) {
+               val = phy_read(phydev, DP83867_CFG3);
+               val |= BIT(7);
+               phy_write(phydev, DP83867_CFG3, val);
+       }
+
        return 0;
 }
 
index 6c646e228833c07b0369c28684f0a6b4aa27cf2c..6e98ede997d3f08d4ac3fa967382b55e4dfc287d 100644 (file)
@@ -1367,6 +1367,7 @@ static struct usb_driver asix_driver = {
        .probe =        usbnet_probe,
        .suspend =      asix_suspend,
        .resume =       asix_resume,
+       .reset_resume = asix_resume,
        .disconnect =   usbnet_disconnect,
        .supports_autosuspend = 1,
        .disable_hub_initiated_lpm = 1,
index 7532646c3b7bedb89c2cbe95e84f09e32e0a8391..23dfb0eac0981704f2770bc3abe9bb32a80e3fc9 100644 (file)
@@ -967,6 +967,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
         */
        need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
        if (!ipv6_ndisc_frame(skb) && !need_strict) {
+               vrf_rx_stats(vrf_dev, skb->len);
                skb->dev = vrf_dev;
                skb->skb_iif = vrf_dev->ifindex;
 
@@ -1011,6 +1012,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
                goto out;
        }
 
+       vrf_rx_stats(vrf_dev, skb->len);
+
        skb_push(skb, skb->mac_len);
        dev_queue_xmit_nit(skb, vrf_dev);
        skb_pull(skb, skb->mac_len);
index b40cfb076f02446fda62aea907301e4a62dcc17a..2fc86dc7a8df3e487c8222fa84310e7832c9c0a8 100644 (file)
@@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
                blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
                blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
        }
-       if (ctrl->stripe_size)
-               blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
+       if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
+               blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
        blk_queue_virt_boundary(q, ctrl->page_size - 1);
        if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
                vwc = true;
@@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
        ctrl->max_hw_sectors =
                min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
 
-       if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
-               unsigned int max_hw_sectors;
-
-               ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
-               max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
-               if (ctrl->max_hw_sectors) {
-                       ctrl->max_hw_sectors = min(max_hw_sectors,
-                                                       ctrl->max_hw_sectors);
-               } else {
-                       ctrl->max_hw_sectors = max_hw_sectors;
-               }
-       }
-
        nvme_set_queue_limits(ctrl, ctrl->admin_q);
        ctrl->sgls = le32_to_cpu(id->sgls);
        ctrl->kas = le16_to_cpu(id->kas);
index 771e2e76187222dfb71616f5665c7b2b22802c74..aa0bc60810a74ff93cf05b294b2a9d4968ecf397 100644 (file)
@@ -1491,19 +1491,20 @@ static int
 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
 {
        struct nvme_fc_queue *queue = &ctrl->queues[1];
-       int i, j, ret;
+       int i, ret;
 
        for (i = 1; i < ctrl->queue_count; i++, queue++) {
                ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
-               if (ret) {
-                       for (j = i-1; j >= 0; j--)
-                               __nvme_fc_delete_hw_queue(ctrl,
-                                               &ctrl->queues[j], j);
-                       return ret;
-               }
+               if (ret)
+                       goto delete_queues;
        }
 
        return 0;
+
+delete_queues:
+       for (; i >= 0; i--)
+               __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
+       return ret;
 }
 
 static int
@@ -2401,8 +2402,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
        WARN_ON_ONCE(!changed);
 
        dev_info(ctrl->ctrl.device,
-               "NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
-               ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
+               "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
+               ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
 
        kref_get(&ctrl->ctrl.kref);
 
index bd5321441d127143b87563e5463d2944aa0c1b0e..6377e14586dc5c837749049cf3dafc7b210a3026 100644 (file)
@@ -135,7 +135,6 @@ struct nvme_ctrl {
 
        u32 page_size;
        u32 max_hw_sectors;
-       u32 stripe_size;
        u16 oncs;
        u16 vid;
        atomic_t abort_limit;
index 3d21a154dce79deceeff77cd16ef5c6bf2a71978..19beeb7b2ac26a5bf0f81bf4e8b995bf29dba195 100644 (file)
@@ -712,15 +712,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
                req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
                nvme_req(req)->result = cqe.result;
                blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
-
        }
 
-       /* If the controller ignores the cq head doorbell and continuously
-        * writes to the queue, it is theoretically possible to wrap around
-        * the queue twice and mistakenly return IRQ_NONE.  Linux only
-        * requires that 0.1% of your interrupts are handled, so this isn't
-        * a big problem.
-        */
        if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
                return;
 
@@ -1909,10 +1902,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
        if (!dev->bar)
                goto release;
 
-       return 0;
+       return 0;
   release:
-       pci_release_mem_regions(pdev);
-       return -ENODEV;
+       pci_release_mem_regions(pdev);
+       return -ENODEV;
 }
 
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
index b71e95044b43e3e62a5d063047064a46d88a6d9f..a5c09e703bd8636d96c9c0c7d603226e46518f61 100644 (file)
@@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
        return nvme_trans_status_code(hdr, nvme_sc);
 }
 
-static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
-                                                       u8 *cmd)
-{
-       u8 immed, no_flush;
-
-       immed = cmd[1] & 0x01;
-       no_flush = cmd[4] & 0x04;
-
-       if (immed != 0) {
-               return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
-                                       ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
-                                       SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
-       } else {
-               if (no_flush == 0) {
-                       /* Issue NVME FLUSH command prior to START STOP UNIT */
-                       int res = nvme_trans_synchronize_cache(ns, hdr);
-                       if (res)
-                               return res;
-               }
-
-               return 0;
-       }
-}
-
 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                                                        u8 *cmd)
 {
@@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
        case SECURITY_PROTOCOL_OUT:
                retcode = nvme_trans_security_protocol(ns, hdr, cmd);
                break;
-       case START_STOP:
-               retcode = nvme_trans_start_stop(ns, hdr, cmd);
-               break;
        case SYNCHRONIZE_CACHE:
                retcode = nvme_trans_synchronize_cache(ns, hdr);
                break;
index ec1ad2aa0a4ca941e8fe51db94cc7ffa452c1693..95ae52390478fe62fdb59605ee2c7a6d0583a919 100644 (file)
@@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
 {
        struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
        u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
-       u64 val;
        u32 val32;
        u16 status = 0;
 
@@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
                        (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
                break;
        case NVME_FEAT_KATO:
-               val = le64_to_cpu(req->cmd->prop_set.value);
-               val32 = val & 0xffff;
+               val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
                req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
                nvmet_set_result(req, req->sq->ctrl->kato);
                break;
index bcb8ebeb01c5d26515c8047e4eed765dbdc4da45..4e8e6a22bce162a61eec428e9c435acb26b74046 100644 (file)
@@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
        rport->lport = nport->lport;
        nport->rport = rport;
 
-       return ret ? ret : count;
+       return count;
 }
 
 
@@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
        tport->lport = nport->lport;
        nport->tport = tport;
 
-       return ret ? ret : count;
+       return count;
 }
 
 
index 2d3b691f3fc4885414ae9234950d8e9c798e5fe6..038ac6934fe9d7f865f9711f24fecadc27071b65 100644 (file)
@@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
        info->screen_size = resource_size(res);
        info->screen_base = devm_ioremap(&dev->dev, res->start,
                                         info->screen_size);
+       if (!info->screen_base) {
+               framebuffer_release(info);
+               return -ENOMEM;
+       }
+
        info->fbops = &cobalt_lcd_fbops;
        info->fix = cobalt_lcdfb_fix;
        info->fix.smem_start = res->start;
index 6254cee8f8f382bf8aa881426453bae189973d34..5db5d1340d69eccf475f0feac7f85665bd6aceb5 100644 (file)
@@ -328,6 +328,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        struct file *file = iocb->ki_filp;
        struct inode *inode = bdev_file_inode(file);
        struct block_device *bdev = I_BDEV(inode);
+       struct blk_plug plug;
        struct blkdev_dio *dio;
        struct bio *bio;
        bool is_read = (iov_iter_rw(iter) == READ);
@@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
        dio->multi_bio = false;
        dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
 
+       blk_start_plug(&plug);
        for (;;) {
                bio->bi_bdev = bdev;
                bio->bi_iter.bi_sector = pos >> 9;
@@ -394,6 +396,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
                submit_bio(bio);
                bio = bio_alloc(GFP_KERNEL, nr_pages);
        }
+       blk_finish_plug(&plug);
 
        if (!dio->is_sync)
                return -EIOCBQUEUED;
index d21771fcf7d345ab4299cb7fa25881ffcc61ef52..0e87401cf33535b03a1d2aa9da6e919d8a56a906 100644 (file)
@@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
                        head = page_buffers(page);
                        bh = head;
                        do {
-                               if (!buffer_mapped(bh))
+                               if (!buffer_mapped(bh) || (bh->b_blocknr < block))
                                        goto next;
                                if (bh->b_blocknr >= block + len)
                                        break;
index 6eeea1dcba41c2fa75c479ce1a3fa16f7cd2e7a5..95cd4c3b06c326708a3315d3da86bdb9aafd5469 100644 (file)
@@ -248,7 +248,8 @@ retry:
                goto out;
 
        if (fscrypt_dummy_context_enabled(inode)) {
-               memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
+               memset(raw_key, 0x42, keysize/2);
+               memset(raw_key+keysize/2, 0x24, keysize - (keysize/2));
                goto got_key;
        }
 
index 6ed7c2eebeec53c7656054d05061dc83a23ef1c0..d6cd7ea4851da877b13c7af306fd07f8468a54f4 100644 (file)
@@ -179,6 +179,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
                BUG_ON(1);
        }
 
+       /* No restrictions on file types which are never encrypted */
+       if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
+           !S_ISLNK(child->i_mode))
+               return 1;
+
        /* no restrictions if the parent directory is not encrypted */
        if (!parent->i_sb->s_cop->is_encrypted(parent))
                return 1;
index a8732fbed381a45bbce44fcdf0731ccfdc1a09ba..5c74f60d0a5094dc0a27f27ae0acd41667414332 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
                __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
 }
 
+static int __dax_invalidate_mapping_entry(struct address_space *mapping,
+                                         pgoff_t index, bool trunc)
+{
+       int ret = 0;
+       void *entry;
+       struct radix_tree_root *page_tree = &mapping->page_tree;
+
+       spin_lock_irq(&mapping->tree_lock);
+       entry = get_unlocked_mapping_entry(mapping, index, NULL);
+       if (!entry || !radix_tree_exceptional_entry(entry))
+               goto out;
+       if (!trunc &&
+           (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+            radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
+               goto out;
+       radix_tree_delete(page_tree, index);
+       mapping->nrexceptional--;
+       ret = 1;
+out:
+       put_unlocked_mapping_entry(mapping, index, entry);
+       spin_unlock_irq(&mapping->tree_lock);
+       return ret;
+}
 /*
  * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
  * entry to get unlocked before deleting it.
  */
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
 {
-       void *entry;
+       int ret = __dax_invalidate_mapping_entry(mapping, index, true);
 
-       spin_lock_irq(&mapping->tree_lock);
-       entry = get_unlocked_mapping_entry(mapping, index, NULL);
        /*
         * This gets called from truncate / punch_hole path. As such, the caller
         * must hold locks protecting against concurrent modifications of the
@@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
         * caller has seen exceptional entry for this index, we better find it
         * at that index as well...
         */
-       if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
-               spin_unlock_irq(&mapping->tree_lock);
-               return 0;
-       }
-       radix_tree_delete(&mapping->page_tree, index);
+       WARN_ON_ONCE(!ret);
+       return ret;
+}
+
+/*
+ * Invalidate exceptional DAX entry if easily possible. This handles DAX
+ * entries for invalidate_inode_pages() so we evict the entry only if we can
+ * do so without blocking.
+ */
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
+{
+       int ret = 0;
+       void *entry, **slot;
+       struct radix_tree_root *page_tree = &mapping->page_tree;
+
+       spin_lock_irq(&mapping->tree_lock);
+       entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
+       if (!entry || !radix_tree_exceptional_entry(entry) ||
+           slot_locked(mapping, slot))
+               goto out;
+       if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
+           radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
+               goto out;
+       radix_tree_delete(page_tree, index);
        mapping->nrexceptional--;
+       ret = 1;
+out:
        spin_unlock_irq(&mapping->tree_lock);
-       dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+       if (ret)
+               dax_wake_mapping_entry_waiter(mapping, index, entry, true);
+       return ret;
+}
 
-       return 1;
+/*
+ * Invalidate exceptional DAX entry if it is clean.
+ */
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+                                     pgoff_t index)
+{
+       return __dax_invalidate_mapping_entry(mapping, index, false);
 }
 
 /*
@@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
  * otherwise it will simply fall out of the page cache under memory
  * pressure without ever having been dirtied.
  */
-static int dax_load_hole(struct address_space *mapping, void *entry,
+static int dax_load_hole(struct address_space *mapping, void **entry,
                         struct vm_fault *vmf)
 {
        struct page *page;
+       int ret;
 
        /* Hole page already exists? Return it...  */
-       if (!radix_tree_exceptional_entry(entry)) {
-               vmf->page = entry;
-               return VM_FAULT_LOCKED;
+       if (!radix_tree_exceptional_entry(*entry)) {
+               page = *entry;
+               goto out;
        }
 
        /* This will replace locked radix tree entry with a hole page */
@@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
                                   vmf->gfp_mask | __GFP_ZERO);
        if (!page)
                return VM_FAULT_OOM;
+ out:
        vmf->page = page;
-       return VM_FAULT_LOCKED;
+       ret = finish_fault(vmf);
+       vmf->page = NULL;
+       *entry = page;
+       if (!ret) {
+               /* Grab reference for PTE that is now referencing the page */
+               get_page(page);
+               return VM_FAULT_NOPAGE;
+       }
+       return ret;
 }
 
 static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
@@ -934,6 +995,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
        if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
                return -EIO;
 
+       /*
+        * Write can allocate block for an area which has a hole page mapped
+        * into page tables. We have to tear down these mappings so that data
+        * written by write(2) is visible in mmap.
+        */
+       if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
+               invalidate_inode_pages2_range(inode->i_mapping,
+                                             pos >> PAGE_SHIFT,
+                                             (end - 1) >> PAGE_SHIFT);
+       }
+
        while (pos < end) {
                unsigned offset = pos & (PAGE_SIZE - 1);
                struct blk_dax_ctl dax = { 0 };
@@ -992,23 +1064,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
        if (iov_iter_rw(iter) == WRITE)
                flags |= IOMAP_WRITE;
 
-       /*
-        * Yes, even DAX files can have page cache attached to them:  A zeroed
-        * page is inserted into the pagecache when we have to serve a write
-        * fault on a hole.  It should never be dirtied and can simply be
-        * dropped from the pagecache once we get real data for the page.
-        *
-        * XXX: This is racy against mmap, and there's nothing we can do about
-        * it. We'll eventually need to shift this down even further so that
-        * we can check if we allocated blocks over a hole first.
-        */
-       if (mapping->nrpages) {
-               ret = invalidate_inode_pages2_range(mapping,
-                               pos >> PAGE_SHIFT,
-                               (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
-               WARN_ON_ONCE(ret);
-       }
-
        while (iov_iter_count(iter)) {
                ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
                                iter, dax_iomap_actor);
@@ -1023,6 +1078,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 }
 EXPORT_SYMBOL_GPL(dax_iomap_rw);
 
+static int dax_fault_return(int error)
+{
+       if (error == 0)
+               return VM_FAULT_NOPAGE;
+       if (error == -ENOMEM)
+               return VM_FAULT_OOM;
+       return VM_FAULT_SIGBUS;
+}
+
 /**
  * dax_iomap_fault - handle a page fault on a DAX file
  * @vma: The virtual memory area where the fault occurred
@@ -1055,12 +1119,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
        if (pos >= i_size_read(inode))
                return VM_FAULT_SIGBUS;
 
-       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
-       if (IS_ERR(entry)) {
-               error = PTR_ERR(entry);
-               goto out;
-       }
-
        if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
                flags |= IOMAP_WRITE;
 
@@ -1071,9 +1129,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
         */
        error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
        if (error)
-               goto unlock_entry;
+               return dax_fault_return(error);
        if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
-               error = -EIO;           /* fs corruption? */
+               vmf_ret = dax_fault_return(-EIO);       /* fs corruption? */
+               goto finish_iomap;
+       }
+
+       entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
+       if (IS_ERR(entry)) {
+               vmf_ret = dax_fault_return(PTR_ERR(entry));
                goto finish_iomap;
        }
 
@@ -1096,13 +1160,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                }
 
                if (error)
-                       goto finish_iomap;
+                       goto error_unlock_entry;
 
                __SetPageUptodate(vmf->cow_page);
                vmf_ret = finish_fault(vmf);
                if (!vmf_ret)
                        vmf_ret = VM_FAULT_DONE_COW;
-               goto finish_iomap;
+               goto unlock_entry;
        }
 
        switch (iomap.type) {
@@ -1114,12 +1178,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                }
                error = dax_insert_mapping(mapping, iomap.bdev, sector,
                                PAGE_SIZE, &entry, vma, vmf);
+               /* -EBUSY is fine, somebody else faulted on the same PTE */
+               if (error == -EBUSY)
+                       error = 0;
                break;
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (!(vmf->flags & FAULT_FLAG_WRITE)) {
-                       vmf_ret = dax_load_hole(mapping, entry, vmf);
-                       break;
+                       vmf_ret = dax_load_hole(mapping, &entry, vmf);
+                       goto unlock_entry;
                }
                /*FALLTHRU*/
        default:
@@ -1128,31 +1195,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                break;
        }
 
+ error_unlock_entry:
+       vmf_ret = dax_fault_return(error) | major;
+ unlock_entry:
+       put_locked_mapping_entry(mapping, vmf->pgoff, entry);
  finish_iomap:
        if (ops->iomap_end) {
-               if (error || (vmf_ret & VM_FAULT_ERROR)) {
-                       /* keep previous error */
-                       ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
-                                       &iomap);
-               } else {
-                       error = ops->iomap_end(inode, pos, PAGE_SIZE,
-                                       PAGE_SIZE, flags, &iomap);
-               }
-       }
- unlock_entry:
-       if (vmf_ret != VM_FAULT_LOCKED || error)
-               put_locked_mapping_entry(mapping, vmf->pgoff, entry);
- out:
-       if (error == -ENOMEM)
-               return VM_FAULT_OOM | major;
-       /* -EBUSY is fine, somebody else faulted on the same PTE */
-       if (error < 0 && error != -EBUSY)
-               return VM_FAULT_SIGBUS | major;
-       if (vmf_ret) {
-               WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
-               return vmf_ret;
+               int copied = PAGE_SIZE;
+
+               if (vmf_ret & VM_FAULT_ERROR)
+                       copied = 0;
+               /*
+                * The fault is done by now and there's no way back (other
+                * thread may be already happily using PTE we have installed).
+                * Just ignore error from ->iomap_end since we cannot do much
+                * with it.
+                */
+               ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
        }
-       return VM_FAULT_NOPAGE | major;
+       return vmf_ret;
 }
 EXPORT_SYMBOL_GPL(dax_iomap_fault);
 
@@ -1276,16 +1337,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
                goto fallback;
 
-       /*
-        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
-        * PMD or a HZP entry.  If it can't (because a 4k page is already in
-        * the tree, for instance), it will return -EEXIST and we just fall
-        * back to 4k entries.
-        */
-       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
-       if (IS_ERR(entry))
-               goto fallback;
-
        /*
         * Note that we don't use iomap_apply here.  We aren't doing I/O, only
         * setting up a mapping, so really we're using iomap_begin() as a way
@@ -1294,10 +1345,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        pos = (loff_t)pgoff << PAGE_SHIFT;
        error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
        if (error)
-               goto unlock_entry;
+               goto fallback;
+
        if (iomap.offset + iomap.length < pos + PMD_SIZE)
                goto finish_iomap;
 
+       /*
+        * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
+        * PMD or a HZP entry.  If it can't (because a 4k page is already in
+        * the tree, for instance), it will return -EEXIST and we just fall
+        * back to 4k entries.
+        */
+       entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
+       if (IS_ERR(entry))
+               goto finish_iomap;
+
        vmf.pgoff = pgoff;
        vmf.flags = flags;
        vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
@@ -1310,7 +1372,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        case IOMAP_UNWRITTEN:
        case IOMAP_HOLE:
                if (WARN_ON_ONCE(write))
-                       goto finish_iomap;
+                       goto unlock_entry;
                result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
                                &entry);
                break;
@@ -1319,20 +1381,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
                break;
        }
 
+ unlock_entry:
+       put_locked_mapping_entry(mapping, pgoff, entry);
  finish_iomap:
        if (ops->iomap_end) {
-               if (result == VM_FAULT_FALLBACK) {
-                       ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
-                                       &iomap);
-               } else {
-                       error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
-                                       iomap_flags, &iomap);
-                       if (error)
-                               result = VM_FAULT_FALLBACK;
-               }
+               int copied = PMD_SIZE;
+
+               if (result == VM_FAULT_FALLBACK)
+                       copied = 0;
+               /*
+                * The fault is done by now and there's no way back (other
+                * thread may be already happily using PMD we have installed).
+                * Just ignore error from ->iomap_end since we cannot do much
+                * with it.
+                */
+               ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
+                               &iomap);
        }
- unlock_entry:
-       put_locked_mapping_entry(mapping, pgoff, entry);
  fallback:
        if (result == VM_FAULT_FALLBACK) {
                split_huge_pmd(vma, pmd, address);
index 0093ea2512a85809e16605088074a8335513e81c..f073bfca694b9982b8bc23e8f0e00be6bef075a7 100644 (file)
@@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
                        mutex_unlock(&ei->truncate_mutex);
                        goto cleanup;
                }
-       } else {
-               *new = true;
        }
+       *new = true;
 
        ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
        mutex_unlock(&ei->truncate_mutex);
index b5f184493c57b0fd91cfc5f6c0633577ce770884..d663d3d7c81cb7fdff0f33f1903e9ed4d1f77f9a 100644 (file)
@@ -258,7 +258,6 @@ out:
 static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
        int result;
-       handle_t *handle = NULL;
        struct inode *inode = file_inode(vma->vm_file);
        struct super_block *sb = inode->i_sb;
        bool write = vmf->flags & FAULT_FLAG_WRITE;
@@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
-                                               EXT4_DATA_TRANS_BLOCKS(sb));
-       } else
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-
-       if (IS_ERR(handle))
-               result = VM_FAULT_SIGBUS;
-       else
-               result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
-
-       if (write) {
-               if (!IS_ERR(handle))
-                       ext4_journal_stop(handle);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+       }
+       down_read(&EXT4_I(inode)->i_mmap_sem);
+       result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
+       up_read(&EXT4_I(inode)->i_mmap_sem);
+       if (write)
                sb_end_pagefault(sb);
-       } else
-               up_read(&EXT4_I(inode)->i_mmap_sem);
 
        return result;
 }
@@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
                                                pmd_t *pmd, unsigned int flags)
 {
        int result;
-       handle_t *handle = NULL;
        struct inode *inode = file_inode(vma->vm_file);
        struct super_block *sb = inode->i_sb;
        bool write = flags & FAULT_FLAG_WRITE;
@@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
        if (write) {
                sb_start_pagefault(sb);
                file_update_time(vma->vm_file);
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-               handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
-                               ext4_chunk_trans_blocks(inode,
-                                                       PMD_SIZE / PAGE_SIZE));
-       } else
-               down_read(&EXT4_I(inode)->i_mmap_sem);
-
-       if (IS_ERR(handle))
-               result = VM_FAULT_SIGBUS;
-       else {
-               result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
-                                            &ext4_iomap_ops);
        }
-
-       if (write) {
-               if (!IS_ERR(handle))
-                       ext4_journal_stop(handle);
-               up_read(&EXT4_I(inode)->i_mmap_sem);
+       down_read(&EXT4_I(inode)->i_mmap_sem);
+       result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
+                                    &ext4_iomap_ops);
+       up_read(&EXT4_I(inode)->i_mmap_sem);
+       if (write)
                sb_end_pagefault(sb);
-       } else
-               up_read(&EXT4_I(inode)->i_mmap_sem);
 
        return result;
 }
index e5ebc37704608a336dd8690d55f06389cd0173bd..d346d42c54d1590250040f0b36c05367287e7bf5 100644 (file)
@@ -256,6 +256,9 @@ xfs_ag_resv_init(
                        goto out;
        }
 
+       ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
+              xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
+              pag->pagf_freeblks + pag->pagf_flcount);
 out:
        return error;
 }
index 6fb2215f8ff77bf0342e5f61dd6d060987e13d77..50add5272807e95374c3043819205f68c5ec55fa 100644 (file)
@@ -409,13 +409,14 @@ xfs_refcountbt_calc_size(
  */
 xfs_extlen_t
 xfs_refcountbt_max_size(
-       struct xfs_mount        *mp)
+       struct xfs_mount        *mp,
+       xfs_agblock_t           agblocks)
 {
        /* Bail out if we're uninitialized, which can happen in mkfs. */
        if (mp->m_refc_mxr[0] == 0)
                return 0;
 
-       return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks);
+       return xfs_refcountbt_calc_size(mp, agblocks);
 }
 
 /*
@@ -430,22 +431,24 @@ xfs_refcountbt_calc_reserves(
 {
        struct xfs_buf          *agbp;
        struct xfs_agf          *agf;
+       xfs_agblock_t           agblocks;
        xfs_extlen_t            tree_len;
        int                     error;
 
        if (!xfs_sb_version_hasreflink(&mp->m_sb))
                return 0;
 
-       *ask += xfs_refcountbt_max_size(mp);
 
        error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
        if (error)
                return error;
 
        agf = XFS_BUF_TO_AGF(agbp);
+       agblocks = be32_to_cpu(agf->agf_length);
        tree_len = be32_to_cpu(agf->agf_refcount_blocks);
        xfs_buf_relse(agbp);
 
+       *ask += xfs_refcountbt_max_size(mp, agblocks);
        *used += tree_len;
 
        return error;
index 3be7768bd51a1c0ebd8c2ccc6e930a730bd39b54..9db008b955b7ed68bb62d8654073da0302668171 100644 (file)
@@ -66,7 +66,8 @@ extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
 
 extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
                unsigned long long len);
-extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp);
+extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
+               xfs_agblock_t agblocks);
 
 extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
                xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
index de25771764bac313ec514a882d6069de1063e5cc..74e5a54bc428fa27d485d88e871fdd77f8494252 100644 (file)
@@ -550,13 +550,14 @@ xfs_rmapbt_calc_size(
  */
 xfs_extlen_t
 xfs_rmapbt_max_size(
-       struct xfs_mount        *mp)
+       struct xfs_mount        *mp,
+       xfs_agblock_t           agblocks)
 {
        /* Bail out if we're uninitialized, which can happen in mkfs. */
        if (mp->m_rmap_mxr[0] == 0)
                return 0;
 
-       return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks);
+       return xfs_rmapbt_calc_size(mp, agblocks);
 }
 
 /*
@@ -571,25 +572,24 @@ xfs_rmapbt_calc_reserves(
 {
        struct xfs_buf          *agbp;
        struct xfs_agf          *agf;
-       xfs_extlen_t            pool_len;
+       xfs_agblock_t           agblocks;
        xfs_extlen_t            tree_len;
        int                     error;
 
        if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
                return 0;
 
-       /* Reserve 1% of the AG or enough for 1 block per record. */
-       pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp));
-       *ask += pool_len;
-
        error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
        if (error)
                return error;
 
        agf = XFS_BUF_TO_AGF(agbp);
+       agblocks = be32_to_cpu(agf->agf_length);
        tree_len = be32_to_cpu(agf->agf_rmap_blocks);
        xfs_buf_relse(agbp);
 
+       /* Reserve 1% of the AG or enough for 1 block per record. */
+       *ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
        *used += tree_len;
 
        return error;
index 2a9ac472fb15a2408ca6d58addc6a5d439b61fe9..19c08e93304954d62c1c1dcdf35a36a49f38ee71 100644 (file)
@@ -60,7 +60,8 @@ extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
 
 extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
                unsigned long long len);
-extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp);
+extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
+               xfs_agblock_t agblocks);
 
 extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
                xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
index 93d12fa2670d53bf8b6bf23c51325d48467b8ab2..242e8091296daff7a029b4233d53db8241ecbcd7 100644 (file)
@@ -631,6 +631,20 @@ xfs_growfs_data_private(
        xfs_set_low_space_thresholds(mp);
        mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
 
+       /*
+        * If we expanded the last AG, free the per-AG reservation
+        * so we can reinitialize it with the new size.
+        */
+       if (new) {
+               struct xfs_perag        *pag;
+
+               pag = xfs_perag_get(mp, agno);
+               error = xfs_ag_resv_free(pag);
+               xfs_perag_put(pag);
+               if (error)
+                       goto out;
+       }
+
        /* Reserve AG metadata blocks. */
        error = xfs_fs_reserve_ag_blocks(mp);
        if (error && error != -ENOSPC)
index ff4d6311c7f4b4912e5bc1c6bdcead81ab855d81..70ca4f608321b9ac9d8123bc2ddc5f216929211c 100644 (file)
@@ -1597,7 +1597,8 @@ xfs_inode_free_cowblocks(
         * If the mapping is dirty or under writeback we cannot touch the
         * CoW fork.  Leave it alone if we're in the midst of a directio.
         */
-       if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
+       if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
+           mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
            mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
            atomic_read(&VFS_I(ip)->i_dio_count))
                return 0;
index fe86a668a57e70419e7fc6e5f126c0aa97cecb96..6e4c7446c3d4561f85d86686d5b4a40bc4cd0ce6 100644 (file)
@@ -526,13 +526,14 @@ xfs_cui_recover(
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
        error = xfs_defer_finish(&tp, &dfops, NULL);
        if (error)
-               goto abort_error;
+               goto abort_defer;
        set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
        error = xfs_trans_commit(tp);
        return error;
 
 abort_error:
        xfs_refcount_finish_one_cleanup(tp, rcur, error);
+abort_defer:
        xfs_defer_cancel(&dfops);
        xfs_trans_cancel(tp);
        return error;
index 276d3023d60f8201b635ae1f0c2ccbf26aac74fd..de6195e3891096316e51fd4efcd49ff091024a4a 100644 (file)
@@ -396,7 +396,7 @@ max_retries_show(
        int             retries;
        struct xfs_error_cfg *cfg = to_error_cfg(kobject);
 
-       if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
+       if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
                retries = -1;
        else
                retries = cfg->max_retries;
@@ -422,7 +422,7 @@ max_retries_store(
                return -EINVAL;
 
        if (val == -1)
-               cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
+               cfg->max_retries = XFS_ERR_RETRY_FOREVER;
        else
                cfg->max_retries = val;
        return count;
index f97bcfe794724d4fb757297851fe218268d0c86b..24ad711739955e573aefea713852b2f49a8fed85 100644 (file)
@@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
 int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
                        struct iomap_ops *ops);
 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
+int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
+                                     pgoff_t index);
 void dax_wake_mapping_entry_waiter(struct address_space *mapping,
                pgoff_t index, void *entry, bool wake_all);
 
index e0341af6950e2116a43b3b0281f57fea8099c06f..76f39754e7b0299df616bc3cb909f9a35fce9ea1 100644 (file)
@@ -146,15 +146,6 @@ enum {
        DISK_EVENT_EJECT_REQUEST                = 1 << 1, /* eject requested */
 };
 
-#define BLK_SCSI_MAX_CMDS      (256)
-#define BLK_SCSI_CMD_PER_LONG  (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
-
-struct blk_scsi_cmd_filter {
-       unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
-       unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
-       struct kobject kobj;
-};
-
 struct disk_part_tbl {
        struct rcu_head rcu_head;
        int len;
index c56b39890a412abfec4acc31e404781215ae3ff6..6b5818d6de322f8b5898e082ab4b3936042beb5a 100644 (file)
  */
 enum pageflags {
        PG_locked,              /* Page is locked. Don't touch. */
-       PG_waiters,             /* Page has waiters, check its waitqueue */
        PG_error,
        PG_referenced,
        PG_uptodate,
        PG_dirty,
        PG_lru,
        PG_active,
+       PG_waiters,             /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
        PG_slab,
        PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
        PG_arch_1,
index 82f26cde830c4b70df30cfa47c7e21dbe6d05a7f..d0e4d1002059360e50254ae2c87dc8f7a87a2dff 100644 (file)
@@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
 }
 EXPORT_SYMBOL_GPL(add_page_wait_queue);
 
+#ifndef clear_bit_unlock_is_negative_byte
+
+/*
+ * PG_waiters is the high bit in the same byte as PG_lock.
+ *
+ * On x86 (and on many other architectures), we can clear PG_lock and
+ * test the sign bit at the same time. But if the architecture does
+ * not support that special operation, we just do this all by hand
+ * instead.
+ *
+ * The read of PG_waiters has to be after (or concurrently with) PG_locked
+ * being cleared, but a memory barrier should be unneccssary since it is
+ * in the same byte as PG_locked.
+ */
+static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
+{
+       clear_bit_unlock(nr, mem);
+       /* smp_mb__after_atomic(); */
+       return test_bit(PG_waiters, mem);
+}
+
+#endif
+
 /**
  * unlock_page - unlock a locked page
  * @page: the page
@@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
  * mechanism between PageLocked pages and PageWriteback pages is shared.
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  *
- * The mb is necessary to enforce ordering between the clear_bit and the read
- * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
+ * Note that this depends on PG_waiters being the sign bit in the byte
+ * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
+ * clear the PG_locked bit and test PG_waiters at the same time fairly
+ * portably (architectures that do LL/SC can test any bit, while x86 can
+ * test the sign bit).
  */
 void unlock_page(struct page *page)
 {
+       BUILD_BUG_ON(PG_waiters != 7);
        page = compound_head(page);
        VM_BUG_ON_PAGE(!PageLocked(page), page);
-       clear_bit_unlock(PG_locked, &page->flags);
-       smp_mb__after_atomic();
-       wake_up_page(page, PG_locked);
+       if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
+               wake_up_page_bit(page, PG_locked);
 }
 EXPORT_SYMBOL(unlock_page);
 
index fd97f1dbce290f39e1d0d0367006df954f20e8f1..dd7b24e083c5b1f76851eb0b5e3359dde92d910e 100644 (file)
 #include <linux/rmap.h>
 #include "internal.h"
 
-static void clear_exceptional_entry(struct address_space *mapping,
-                                   pgoff_t index, void *entry)
+static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
+                              void *entry)
 {
        struct radix_tree_node *node;
        void **slot;
 
-       /* Handled by shmem itself */
-       if (shmem_mapping(mapping))
-               return;
-
-       if (dax_mapping(mapping)) {
-               dax_delete_mapping_entry(mapping, index);
-               return;
-       }
        spin_lock_irq(&mapping->tree_lock);
        /*
         * Regular page slots are stabilized by the page lock even
@@ -55,6 +47,56 @@ unlock:
        spin_unlock_irq(&mapping->tree_lock);
 }
 
+/*
+ * Unconditionally remove exceptional entry. Usually called from truncate path.
+ */
+static void truncate_exceptional_entry(struct address_space *mapping,
+                                      pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return;
+
+       if (dax_mapping(mapping)) {
+               dax_delete_mapping_entry(mapping, index);
+               return;
+       }
+       clear_shadow_entry(mapping, index, entry);
+}
+
+/*
+ * Invalidate exceptional entry if easily possible. This handles exceptional
+ * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
+ * clean entries.
+ */
+static int invalidate_exceptional_entry(struct address_space *mapping,
+                                       pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return 1;
+       if (dax_mapping(mapping))
+               return dax_invalidate_mapping_entry(mapping, index);
+       clear_shadow_entry(mapping, index, entry);
+       return 1;
+}
+
+/*
+ * Invalidate exceptional entry if clean. This handles exceptional entries for
+ * invalidate_inode_pages2() so for DAX it evicts only clean entries.
+ */
+static int invalidate_exceptional_entry2(struct address_space *mapping,
+                                        pgoff_t index, void *entry)
+{
+       /* Handled by shmem itself */
+       if (shmem_mapping(mapping))
+               return 1;
+       if (dax_mapping(mapping))
+               return dax_invalidate_mapping_entry_sync(mapping, index);
+       clear_shadow_entry(mapping, index, entry);
+       return 1;
+}
+
 /**
  * do_invalidatepage - invalidate part or all of a page
  * @page: the page which is affected
@@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               truncate_exceptional_entry(mapping, index,
+                                                          page);
                                continue;
                        }
 
@@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
                        }
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               truncate_exceptional_entry(mapping, index,
+                                                          page);
                                continue;
                        }
 
@@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               invalidate_exceptional_entry(mapping, index,
+                                                            page);
                                continue;
                        }
 
@@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                break;
 
                        if (radix_tree_exceptional_entry(page)) {
-                               clear_exceptional_entry(mapping, index, page);
+                               if (!invalidate_exceptional_entry2(mapping,
+                                                                  index, page))
+                                       ret = -EBUSY;
                                continue;
                        }
 
index 8ca6a929bf1255cb432fd4bd59d34345d62e09c4..95087e6e8258366af95579bb308d1a6e18266f0e 100644 (file)
@@ -399,7 +399,7 @@ bridged_dnat:
                                br_nf_hook_thresh(NF_BR_PRE_ROUTING,
                                                  net, sk, skb, skb->dev,
                                                  NULL,
-                                                 br_nf_pre_routing_finish);
+                                                 br_nf_pre_routing_finish_bridge);
                                return 0;
                        }
                        ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
index f465bad2ef2c69fb6f14010f53a702f2b1546230..fb55327dcfeabdaf3eeecc3a8d176ae215612649 100644 (file)
@@ -102,7 +102,6 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
        }
        msg = nla_data(nla);
        memset(msg, 0, al);
-       genlmsg_end(skb, msg_header);
        goto out;
 
 err:
@@ -112,6 +111,13 @@ out:
        swap(data->skb, skb);
        spin_unlock_irqrestore(&data->lock, flags);
 
+       if (skb) {
+               struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
+               struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
+
+               genlmsg_end(skb, genlmsg_data(gnlh));
+       }
+
        return skb;
 }
 
index d6447dc1037151495d88064938dfb4e255377416..fe4e1531976c3a36127b6ad4af33f24534af4c52 100644 (file)
@@ -468,8 +468,9 @@ ip_proto_again:
                        if (hdr->flags & GRE_ACK)
                                offset += sizeof(((struct pptp_gre_header *)0)->ack);
 
-                       ppp_hdr = skb_header_pointer(skb, nhoff + offset,
-                                                    sizeof(_ppp_hdr), _ppp_hdr);
+                       ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
+                                                    sizeof(_ppp_hdr),
+                                                    data, hlen, _ppp_hdr);
                        if (!ppp_hdr)
                                goto out_bad;
 
index 3ff8938893ec85311012b55a27de10d9368b50f1..eae0332b0e8c1f861ce629ed9ce3ddc45802a6b8 100644 (file)
@@ -85,7 +85,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
        if (tb)
                return tb;
 
-       if (id == RT_TABLE_LOCAL)
+       if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules)
                alias = fib_new_table(net, RT_TABLE_MAIN);
 
        tb = fib_trie_table(id, alias);
index 68d622133f5386e621148da7330dcc747d186c6c..5b15459955f84cfc26dd2b12f129b1ee4014e62b 100644 (file)
@@ -219,9 +219,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
 static void igmp_gq_start_timer(struct in_device *in_dev)
 {
        int tv = prandom_u32() % in_dev->mr_maxdelay;
+       unsigned long exp = jiffies + tv + 2;
+
+       if (in_dev->mr_gq_running &&
+           time_after_eq(exp, (in_dev->mr_gq_timer).expires))
+               return;
 
        in_dev->mr_gq_running = 1;
-       if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
+       if (!mod_timer(&in_dev->mr_gq_timer, exp))
                in_dev_hold(in_dev);
 }
 
index 21db00d0362bb60d48aed2c900b857f86cef5793..a6b8c1a4102ba7ab07efbcf504fa7ca4025c6f19 100644 (file)
@@ -144,7 +144,7 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
        rcu_read_lock_bh();
        c = __clusterip_config_find(net, clusterip);
        if (c) {
-               if (unlikely(!atomic_inc_not_zero(&c->refcount)))
+               if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
                        c = NULL;
                else if (entry)
                        atomic_inc(&c->entries);
@@ -166,14 +166,15 @@ clusterip_config_init_nodelist(struct clusterip_config *c,
 
 static struct clusterip_config *
 clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
-                       struct net_device *dev)
+                     struct net_device *dev)
 {
+       struct net *net = dev_net(dev);
        struct clusterip_config *c;
-       struct clusterip_net *cn = net_generic(dev_net(dev), clusterip_net_id);
+       struct clusterip_net *cn = net_generic(net, clusterip_net_id);
 
        c = kzalloc(sizeof(*c), GFP_ATOMIC);
        if (!c)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        c->dev = dev;
        c->clusterip = ip;
@@ -185,6 +186,17 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
        atomic_set(&c->refcount, 1);
        atomic_set(&c->entries, 1);
 
+       spin_lock_bh(&cn->lock);
+       if (__clusterip_config_find(net, ip)) {
+               spin_unlock_bh(&cn->lock);
+               kfree(c);
+
+               return ERR_PTR(-EBUSY);
+       }
+
+       list_add_rcu(&c->list, &cn->configs);
+       spin_unlock_bh(&cn->lock);
+
 #ifdef CONFIG_PROC_FS
        {
                char buffer[16];
@@ -195,16 +207,16 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip,
                                          cn->procdir,
                                          &clusterip_proc_fops, c);
                if (!c->pde) {
+                       spin_lock_bh(&cn->lock);
+                       list_del_rcu(&c->list);
+                       spin_unlock_bh(&cn->lock);
                        kfree(c);
-                       return NULL;
+
+                       return ERR_PTR(-ENOMEM);
                }
        }
 #endif
 
-       spin_lock_bh(&cn->lock);
-       list_add_rcu(&c->list, &cn->configs);
-       spin_unlock_bh(&cn->lock);
-
        return c;
 }
 
@@ -410,9 +422,9 @@ static int clusterip_tg_check(const struct xt_tgchk_param *par)
 
                        config = clusterip_config_init(cipinfo,
                                                        e->ip.dst.s_addr, dev);
-                       if (!config) {
+                       if (IS_ERR(config)) {
                                dev_put(dev);
-                               return -ENOMEM;
+                               return PTR_ERR(config);
                        }
                        dev_mc_add(config->dev, config->clustermac);
                }
index f4b4a4a5f4ba740f4a290e2394034fe335941abe..d82042c8d8fd4b38eac12a58eb634438aab726a7 100644 (file)
@@ -189,12 +189,12 @@ static int vti6_tnl_create2(struct net_device *dev)
        struct vti6_net *ip6n = net_generic(net, vti6_net_id);
        int err;
 
+       dev->rtnl_link_ops = &vti6_link_ops;
        err = register_netdevice(dev);
        if (err < 0)
                goto out;
 
        strcpy(t->parms.name, dev->name);
-       dev->rtnl_link_ops = &vti6_link_ops;
 
        dev_hold(dev);
        vti6_tnl_link(ip6n, t);
index a019a87e58ee8151620eb3d8500979d02dba54c1..0db5f9782265ebb033f10d07da815495e8a7d278 100644 (file)
@@ -2115,7 +2115,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
         * is called on error from nf_tables_newrule().
         */
        expr = nft_expr_first(rule);
-       while (expr->ops && expr != nft_expr_last(rule)) {
+       while (expr != nft_expr_last(rule) && expr->ops) {
                nf_tables_expr_destroy(ctx, expr);
                expr = nft_expr_next(expr);
        }
index 36d2b10965464cd8abc3ad57326aefb00b003971..7d699bbd45b0eaae1574a094f54a85d8219d390a 100644 (file)
@@ -250,6 +250,22 @@ static int nft_payload_l4csum_update(const struct nft_pktinfo *pkt,
        return 0;
 }
 
+static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
+                                __wsum fsum, __wsum tsum, int csum_offset)
+{
+       __sum16 sum;
+
+       if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               return -1;
+
+       nft_csum_replace(&sum, fsum, tsum);
+       if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
+           skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               return -1;
+
+       return 0;
+}
+
 static void nft_payload_set_eval(const struct nft_expr *expr,
                                 struct nft_regs *regs,
                                 const struct nft_pktinfo *pkt)
@@ -259,7 +275,6 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
        const u32 *src = &regs->data[priv->sreg];
        int offset, csum_offset;
        __wsum fsum, tsum;
-       __sum16 sum;
 
        switch (priv->base) {
        case NFT_PAYLOAD_LL_HEADER:
@@ -282,18 +297,14 @@ static void nft_payload_set_eval(const struct nft_expr *expr,
        csum_offset = offset + priv->csum_offset;
        offset += priv->offset;
 
-       if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+       if ((priv->csum_type == NFT_PAYLOAD_CSUM_INET || priv->csum_flags) &&
            (priv->base != NFT_PAYLOAD_TRANSPORT_HEADER ||
             skb->ip_summed != CHECKSUM_PARTIAL)) {
-               if (skb_copy_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
-                       goto err;
-
                fsum = skb_checksum(skb, offset, priv->len, 0);
                tsum = csum_partial(src, priv->len, 0);
-               nft_csum_replace(&sum, fsum, tsum);
 
-               if (!skb_make_writable(skb, csum_offset + sizeof(sum)) ||
-                   skb_store_bits(skb, csum_offset, &sum, sizeof(sum)) < 0)
+               if (priv->csum_type == NFT_PAYLOAD_CSUM_INET &&
+                   nft_payload_csum_inet(skb, src, fsum, tsum, csum_offset))
                        goto err;
 
                if (priv->csum_flags &&
index 3e19fa1230dc6b9274090257be97b91827699485..dbb6aaff67ec5c151f8c8c6333721421f8e85076 100644 (file)
@@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
 
        if (priv->queues_total > 1) {
                if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
-                       int cpu = smp_processor_id();
+                       int cpu = raw_smp_processor_id();
 
                        queue = priv->queuenum + cpu % priv->queues_total;
                } else {
index bd6efc53f26d01d8c8ac246c36fa4b1cf970ce31..2d6fe3559912674385e7679557fc31ddeb901b38 100644 (file)
@@ -110,30 +110,32 @@ static int nft_quota_obj_init(const struct nlattr * const tb[],
 static int nft_quota_do_dump(struct sk_buff *skb, struct nft_quota *priv,
                             bool reset)
 {
+       u64 consumed, consumed_cap;
        u32 flags = priv->flags;
-       u64 consumed;
-
-       if (reset) {
-               consumed = atomic64_xchg(&priv->consumed, 0);
-               if (test_and_clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags))
-                       flags |= NFT_QUOTA_F_DEPLETED;
-       } else {
-               consumed = atomic64_read(&priv->consumed);
-       }
 
        /* Since we inconditionally increment consumed quota for each packet
         * that we see, don't go over the quota boundary in what we send to
         * userspace.
         */
-       if (consumed > priv->quota)
-               consumed = priv->quota;
+       consumed = atomic64_read(&priv->consumed);
+       if (consumed >= priv->quota) {
+               consumed_cap = priv->quota;
+               flags |= NFT_QUOTA_F_DEPLETED;
+       } else {
+               consumed_cap = consumed;
+       }
 
        if (nla_put_be64(skb, NFTA_QUOTA_BYTES, cpu_to_be64(priv->quota),
                         NFTA_QUOTA_PAD) ||
-           nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed),
+           nla_put_be64(skb, NFTA_QUOTA_CONSUMED, cpu_to_be64(consumed_cap),
                         NFTA_QUOTA_PAD) ||
            nla_put_be32(skb, NFTA_QUOTA_FLAGS, htonl(flags)))
                goto nla_put_failure;
+
+       if (reset) {
+               atomic64_sub(consumed, &priv->consumed);
+               clear_bit(NFT_QUOTA_DEPLETED_BIT, &priv->flags);
+       }
        return 0;
 
 nla_put_failure:
index 950fd2e64bb73b9f261188bba272ea7e7ec10249..12262c0cc6914e6a5eebac1b887b129a15fb7466 100644 (file)
@@ -39,6 +39,9 @@
 #include "hash-map.h"
 #endif
 
+#if BUILDING_GCC_VERSION >= 7000
+#include "memmodel.h"
+#endif
 #include "emit-rtl.h"
 #include "debug.h"
 #include "target.h"
@@ -91,6 +94,9 @@
 #include "tree-ssa-alias.h"
 #include "tree-ssa.h"
 #include "stringpool.h"
+#if BUILDING_GCC_VERSION >= 7000
+#include "tree-vrp.h"
+#endif
 #include "tree-ssanames.h"
 #include "print-tree.h"
 #include "tree-eh.h"
@@ -287,6 +293,22 @@ static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct c
        return NULL;
 }
 
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+       cgraph_node_ptr alias;
+
+       if (callback(node, data))
+               return true;
+
+       for (alias = node->same_body; alias; alias = alias->next) {
+               if (include_overwritable || cgraph_function_body_availability(alias) > AVAIL_OVERWRITABLE)
+                       if (cgraph_for_node_and_aliases(alias, callback, data, include_overwritable))
+                               return true;
+       }
+
+       return false;
+}
+
 #define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
        for ((node) = cgraph_first_function_with_gimple_body(); (node); \
                (node) = cgraph_next_function_with_gimple_body(node))
@@ -399,6 +421,7 @@ typedef union gimple_statement_d gassign;
 typedef union gimple_statement_d gcall;
 typedef union gimple_statement_d gcond;
 typedef union gimple_statement_d gdebug;
+typedef union gimple_statement_d ggoto;
 typedef union gimple_statement_d gphi;
 typedef union gimple_statement_d greturn;
 
@@ -452,6 +475,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
        return stmt;
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return stmt;
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return stmt;
@@ -496,6 +529,14 @@ static inline const greturn *as_a_const_greturn(const_gimple stmt)
 
 typedef struct rtx_def rtx_insn;
 
+static inline const char *get_decl_section_name(const_tree decl)
+{
+       if (DECL_SECTION_NAME(decl) == NULL_TREE)
+               return NULL;
+
+       return TREE_STRING_POINTER(DECL_SECTION_NAME(decl));
+}
+
 static inline void set_decl_section_name(tree node, const char *value)
 {
        if (value)
@@ -511,6 +552,7 @@ typedef struct gimple_statement_base gassign;
 typedef struct gimple_statement_call gcall;
 typedef struct gimple_statement_base gcond;
 typedef struct gimple_statement_base gdebug;
+typedef struct gimple_statement_base ggoto;
 typedef struct gimple_statement_phi gphi;
 typedef struct gimple_statement_base greturn;
 
@@ -564,6 +606,16 @@ static inline const gdebug *as_a_const_gdebug(const_gimple stmt)
        return stmt;
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return stmt;
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return stmt;
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return as_a<gphi>(stmt);
@@ -611,6 +663,11 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
 
 #define INSN_DELETED_P(insn) (insn)->deleted()
 
+static inline const char *get_decl_section_name(const_tree decl)
+{
+       return DECL_SECTION_NAME(decl);
+}
+
 /* symtab/cgraph related */
 #define debug_cgraph_node(node) (node)->debug()
 #define cgraph_get_node(decl) cgraph_node::get(decl)
@@ -619,6 +676,7 @@ inline bool is_a_helper<const gassign *>::test(const_gimple gs)
 #define cgraph_n_nodes symtab->cgraph_count
 #define cgraph_max_uid symtab->cgraph_max_uid
 #define varpool_get_node(decl) varpool_node::get(decl)
+#define dump_varpool_node(file, node) (node)->dump(file)
 
 #define cgraph_create_edge(caller, callee, call_stmt, count, freq, nest) \
        (caller)->create_edge((callee), (call_stmt), (count), (freq))
@@ -674,6 +732,11 @@ static inline cgraph_node_ptr cgraph_alias_target(cgraph_node_ptr node)
        return node->get_alias_target();
 }
 
+static inline bool cgraph_for_node_and_aliases(cgraph_node_ptr node, bool (*callback)(cgraph_node_ptr, void *), void *data, bool include_overwritable)
+{
+       return node->call_for_symbol_thunks_and_aliases(callback, data, include_overwritable);
+}
+
 static inline struct cgraph_node_hook_list *cgraph_add_function_insertion_hook(cgraph_node_hook hook, void *data)
 {
        return symtab->add_cgraph_insertion_hook(hook, data);
@@ -729,6 +792,13 @@ static inline gimple gimple_build_assign_with_ops(enum tree_code subcode, tree l
        return gimple_build_assign(lhs, subcode, op1, op2 PASS_MEM_STAT);
 }
 
+template <>
+template <>
+inline bool is_a_helper<const ggoto *>::test(const_gimple gs)
+{
+       return gs->code == GIMPLE_GOTO;
+}
+
 template <>
 template <>
 inline bool is_a_helper<const greturn *>::test(const_gimple gs)
@@ -766,6 +836,16 @@ static inline const gcall *as_a_const_gcall(const_gimple stmt)
        return as_a<const gcall *>(stmt);
 }
 
+static inline ggoto *as_a_ggoto(gimple stmt)
+{
+       return as_a<ggoto *>(stmt);
+}
+
+static inline const ggoto *as_a_const_ggoto(const_gimple stmt)
+{
+       return as_a<const ggoto *>(stmt);
+}
+
 static inline gphi *as_a_gphi(gimple stmt)
 {
        return as_a<gphi *>(stmt);
@@ -828,4 +908,9 @@ static inline void debug_gimple_stmt(const_gimple s)
 #define debug_gimple_stmt(s) debug_gimple_stmt(CONST_CAST_GIMPLE(s))
 #endif
 
+#if BUILDING_GCC_VERSION >= 7000
+#define get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep, keep_aligning)  \
+       get_inner_reference(exp, pbitsize, pbitpos, poffset, pmode, punsignedp, preversep, pvolatilep)
+#endif
+
 #endif
index 12541126575b1e2416ad48d58f1d01f8e08a5d93..8ff203ad48093f57fccf04bb9d7c9b9e1952603b 100644 (file)
@@ -328,9 +328,9 @@ static enum tree_code get_op(tree *rhs)
                        op = LROTATE_EXPR;
                        /*
                         * This code limits the value of random_const to
-                        * the size of a wide int for the rotation
+                        * the size of a long for the rotation
                         */
-                       random_const &= HOST_BITS_PER_WIDE_INT - 1;
+                       random_const %= TYPE_PRECISION(long_unsigned_type_node);
                        break;
                }