Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 16 May 2016 20:17:56 +0000 (13:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 16 May 2016 20:17:56 +0000 (13:17 -0700)
Pull locking changes from Ingo Molnar:
 "The main changes in this cycle were:

   - pvqspinlock statistics fixes (Davidlohr Bueso)

   - flip atomic_fetch_or() arguments (Peter Zijlstra)

   - locktorture simplification (Paul E.  McKenney)

   - documentation updates (SeongJae Park, David Howells, Davidlohr
     Bueso, Paul E McKenney, Peter Zijlstra, Will Deacon)

   - various fixes"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/atomics: Flip atomic_fetch_or() arguments
  locking/pvqspinlock: Robustify init_qspinlock_stat()
  locking/pvqspinlock: Avoid double resetting of stats
  lcoking/locktorture: Simplify the torture_runnable computation
  locking/Documentation: Clarify that ACQUIRE applies to loads, RELEASE applies to stores
  locking/Documentation: State purpose of memory-barriers.txt
  locking/Documentation: Add disclaimer
  locking/Documentation/lockdep: Fix spelling mistakes
  locking/lockdep: Deinline register_lock_class(), save 2328 bytes
  locking/locktorture: Fix NULL pointer dereference for cleanup paths
  locking/locktorture: Fix deboosting NULL pointer dereference
  locking/Documentation: Mention smp_cond_acquire()
  locking/Documentation: Insert white spaces consistently
  locking/Documentation: Fix formatting inconsistencies
  locking/Documentation: Add missed subsection in TOC
  locking/Documentation: Fix missed s/lock/acquire renames
  locking/Documentation: Clarify relationship of barrier() to control dependencies

473 files changed:
.mailmap
Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/Data-Structures.html [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/TreeLevel.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/TreeMapping.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/blkd_task.svg [new file with mode: 0644]
Documentation/RCU/Design/Data-Structures/nxtlist.svg [new file with mode: 0644]
Documentation/RCU/Design/Requirements/2013-08-is-it-dead.png [deleted file]
Documentation/RCU/Design/Requirements/RCUApplicability.svg [deleted file]
Documentation/RCU/Design/Requirements/Requirements.html
Documentation/RCU/Design/Requirements/Requirements.htmlx [deleted file]
Documentation/RCU/Design/htmlqqz.sh [deleted file]
Documentation/RCU/trace.txt
Documentation/RCU/whatisRCU.txt
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/net/cpsw.txt
Documentation/devicetree/bindings/regmap/regmap.txt
Documentation/kernel-parameters.txt
Documentation/networking/altera_tse.txt
Documentation/networking/checksum-offloads.txt
Documentation/networking/ipvlan.txt
Documentation/networking/pktgen.txt
Documentation/networking/vrf.txt
Documentation/networking/xfrm_sync.txt
Documentation/sysctl/kernel.txt
MAINTAINERS
Makefile
arch/arc/Kconfig
arch/arc/include/asm/io.h
arch/arc/include/asm/mmzone.h [new file with mode: 0644]
arch/arc/include/asm/page.h
arch/arc/include/asm/pgtable.h
arch/arc/mm/init.c
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap34xx.dtsi
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-cm-t54.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/qcom-apq8064.dtsi
arch/arm/boot/dts/sama5d2.dtsi
arch/arm/boot/dts/sun8i-q8-common.dtsi
arch/arm/include/asm/domain.h
arch/arm/include/asm/efi.h
arch/arm/kernel/efi.c
arch/arm/kernel/head-nommu.S
arch/arm/kernel/setup.c
arch/arm/kvm/mmu.c
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mach-davinci/common.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-socfpga/headsmp.S
arch/arm/mm/nommu.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/renesas/r8a7795.dtsi
arch/arm64/include/asm/efi.h
arch/arm64/kernel/efi.c
arch/arm64/kernel/image.h
arch/arm64/net/bpf_jit_comp.c
arch/ia64/kernel/efi.c
arch/parisc/kernel/syscall.S
arch/powerpc/include/asm/word-at-a-time.h
arch/sparc/configs/sparc32_defconfig
arch/sparc/configs/sparc64_defconfig
arch/sparc/include/asm/spitfire.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/cherrs.S
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.c
arch/sparc/kernel/fpu_traps.S
arch/sparc/kernel/head_64.S
arch/sparc/kernel/misctrap.S
arch/sparc/kernel/pci.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/spiterrs.S
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/utrap.S
arch/sparc/kernel/vio.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/kernel/winfixup.S
arch/sparc/mm/init_64.c
arch/x86/boot/compressed/eboot.c
arch/x86/boot/compressed/eboot.h
arch/x86/configs/kvm_guest.config
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/events/amd/iommu.c
arch/x86/events/intel/core.c
arch/x86/events/intel/pt.c
arch/x86/events/intel/uncore_snbep.c
arch/x86/events/msr.c
arch/x86/include/asm/efi.h
arch/x86/include/asm/uaccess.h
arch/x86/kernel/apic/x2apic_uv_x.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/reboot.c
arch/x86/kernel/signal.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/sysfb_efi.c
arch/x86/kernel/tsc_msr.c
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/mm/pageattr.c
arch/x86/platform/efi/efi-bgrt.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
arch/x86/platform/efi/quirks.c
block/blk-map.c
crypto/Kconfig
crypto/ahash.c
crypto/testmgr.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/nfit.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci_platform.c
drivers/ata/ahci_seattle.c [new file with mode: 0644]
drivers/ata/libahci.c
drivers/base/power/opp/core.c
drivers/base/property.c
drivers/base/regmap/internal.h
drivers/base/regmap/regmap-mmio.c
drivers/base/regmap/regmap-spmi.c
drivers/clk/imx/clk-imx6q.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/sti-cpufreq.c
drivers/cpuidle/cpuidle-arm.c
drivers/crypto/qat/qat_common/adf_common_drv.h
drivers/crypto/qat/qat_common/adf_ctl_drv.c
drivers/crypto/qat/qat_common/adf_sriov.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/Makefile
drivers/firmware/efi/arm-init.c
drivers/firmware/efi/arm-runtime.c
drivers/firmware/efi/capsule-loader.c [new file with mode: 0644]
drivers/firmware/efi/capsule.c [new file with mode: 0644]
drivers/firmware/efi/efi.c
drivers/firmware/efi/efibc.c [new file with mode: 0644]
drivers/firmware/efi/efivars.c
drivers/firmware/efi/fake_mem.c
drivers/firmware/efi/libstub/Makefile
drivers/firmware/efi/libstub/arm-stub.c
drivers/firmware/efi/libstub/arm32-stub.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/firmware/efi/libstub/gop.c [new file with mode: 0644]
drivers/firmware/efi/memattr.c [new file with mode: 0644]
drivers/firmware/efi/reboot.c
drivers/firmware/efi/runtime-wrappers.c
drivers/firmware/efi/vars.c
drivers/firmware/qemu_fw_cfg.c
drivers/gpio/gpio-rcar.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_audio.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_ddi.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_mst.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/radeon_dp_auxch.c
drivers/gpu/ipu-v3/ipu-common.c
drivers/hid/hid-ids.h
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hv/ring_buffer.c
drivers/iio/adc/at91-sama5d2_adc.c
drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
drivers/iio/magnetometer/ak8975.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/input/misc/max8997_haptic.c
drivers/input/misc/twl6040-vibra.c
drivers/input/mouse/byd.c
drivers/input/touchscreen/atmel_mxt_ts.c
drivers/input/touchscreen/zforce_ts.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid5.c
drivers/media/media-device.c
drivers/media/platform/exynos4-is/media-dev.c
drivers/media/platform/s3c-camif/camif-core.c
drivers/media/v4l2-core/videobuf2-v4l2.c
drivers/misc/mic/vop/vop_vringh.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
drivers/net/ethernet/apm/xgene/xgene_enet_main.h
drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/ezchip/nps_enet.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/pxa168_eth.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/uar.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qlogic/qede/qede_main.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
drivers/net/ethernet/renesas/ravb_main.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw.h
drivers/net/ethernet/ti/davinci_emac.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/geneve.c
drivers/net/macsec.c
drivers/net/macvtap.c
drivers/net/phy/at803x.c
drivers/net/phy/phy.c
drivers/net/usb/lan78xx.c
drivers/net/usb/pegasus.c
drivers/net/usb/smsc75xx.c
drivers/net/usb/smsc95xx.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_phy.c
drivers/net/wireless/intel/iwlwifi/iwl-8000.c
drivers/net/wireless/intel/iwlwifi/iwl-drv.c
drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c
drivers/net/wireless/intel/iwlwifi/mvm/fw.c
drivers/net/wireless/intel/iwlwifi/mvm/tx.c
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
drivers/net/xen-netback/netback.c
drivers/nvdimm/pmem.c
drivers/nvmem/mxs-ocotp.c
drivers/pci/bus.c
drivers/pinctrl/pinctrl-at91-pio4.c
drivers/rapidio/devices/rio_mport_cdev.c
drivers/regulator/axp20x-regulator.c
drivers/regulator/da9063-regulator.c
drivers/regulator/gpio-regulator.c
drivers/regulator/s2mps11.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/qla1280.c
drivers/spi/spi-fsl-dspi.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-ti-qspi.c
drivers/usb/core/port.c
drivers/usb/core/usb.c
drivers/usb/musb/jz4740.c
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/serial/cp210x.c
drivers/video/fbdev/Kconfig
drivers/video/fbdev/efifb.c
drivers/virtio/virtio_ring.c
drivers/xen/balloon.c
drivers/xen/efi.c
drivers/xen/evtchn.c
fs/ecryptfs/file.c
fs/efivarfs/file.c
fs/efivarfs/super.c
fs/fuse/file.c
fs/isofs/rock.c
fs/kernfs/dir.c
fs/kernfs/mount.c
fs/namei.c
fs/ocfs2/acl.c
fs/ocfs2/acl.h
fs/ocfs2/file.c
fs/ocfs2/namei.c
fs/ocfs2/refcounttree.c
fs/ocfs2/xattr.c
fs/ocfs2/xattr.h
fs/open.c
fs/overlayfs/super.c
fs/pnode.c
fs/proc/base.c
fs/splice.c
fs/udf/super.c
fs/udf/udfdecl.h
fs/udf/unicode.c
include/acpi/acpi_bus.h
include/linux/bpf.h
include/linux/compiler-gcc.h
include/linux/dcache.h
include/linux/efi.h
include/linux/hash.h
include/linux/if_ether.h
include/linux/kernfs.h
include/linux/mfd/samsung/s2mps11.h
include/linux/mm.h
include/linux/namei.h
include/linux/net.h
include/linux/netdevice.h
include/linux/of.h
include/linux/page-flags.h
include/linux/proportions.h [deleted file]
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/rio_mport_cdev.h [deleted file]
include/linux/sched.h
include/linux/signal.h
include/linux/swap.h
include/linux/uio.h
include/net/netns/xfrm.h
include/net/udp_tunnel.h
include/net/vxlan.h
include/trace/events/rcu.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/if.h
include/uapi/linux/if_macsec.h
include/uapi/linux/libc-compat.h
include/uapi/linux/rio_mport_cdev.h [new file with mode: 0644]
include/uapi/linux/signal.h
include/uapi/linux/swab.h
include/uapi/linux/tc_act/Kbuild
include/xen/page.h
kernel/bpf/inode.c
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/cgroup.c
kernel/events/core.c
kernel/events/ring_buffer.c
kernel/fork.c
kernel/rcu/Makefile
kernel/rcu/rcuperf.c [new file with mode: 0644]
kernel/rcu/rcutorture.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/rcu/update.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/signal.c
kernel/torture.c
kernel/trace/trace_events.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/asn1_decoder.c
lib/iov_iter.c
lib/proportions.c [deleted file]
lib/stackdepot.c
mm/compaction.c
mm/huge_memory.c
mm/ksm.c
mm/memory.c
mm/page-writeback.c
mm/page_alloc.c
mm/swapfile.c
mm/zsmalloc.c
mm/zswap.c
net/batman-adv/bat_v.c
net/batman-adv/distributed-arp-table.c
net/batman-adv/hard-interface.c
net/batman-adv/originator.c
net/batman-adv/routing.c
net/batman-adv/send.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bridge/br_ioctl.c
net/bridge/br_multicast.c
net/core/dev.c
net/core/flow.c
net/core/rtnetlink.c
net/ipv4/fib_semantics.c
net/ipv4/fou.c
net/ipv4/inet_hashtables.c
net/ipv4/ip_gre.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/tcp_output.c
net/ipv4/udp_offload.c
net/ipv6/icmp.c
net/ipv6/ila/ila_lwt.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/l2tp/l2tp_core.c
net/llc/af_llc.c
net/mac80211/iface.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nfnetlink_acct.c
net/netfilter/xt_IDLETIMER.c
net/openvswitch/conntrack.c
net/rds/tcp.c
net/rds/tcp.h
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_vlan.c
net/sched/sch_netem.c
net/tipc/node.c
net/vmw_vsock/af_vsock.c
net/x25/x25_facilities.c
net/xfrm/xfrm_output.c
samples/bpf/trace_output_kern.c
scripts/mod/file2alias.c
security/integrity/ima/ima_policy.c
sound/pci/hda/hda_sysfs.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks.c
tools/build/Makefile.feature
tools/build/feature/Makefile
tools/build/feature/test-all.c
tools/build/feature/test-dwarf_getlocations.c [new file with mode: 0644]
tools/lib/traceevent/parse-filter.c
tools/net/bpf_jit_disasm.c
tools/perf/arch/x86/util/dwarf-regs.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/config/Makefile
tools/perf/util/dwarf-aux.c
tools/perf/util/event.c
tools/perf/util/evsel.c
tools/perf/util/parse-events.c
tools/perf/util/sort.c
tools/perf/util/thread_map.c
tools/testing/selftests/Makefile
tools/testing/selftests/rcutorture/bin/jitter.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh [new file with mode: 0755]
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/configs/rcu/TREE04
tools/testing/selftests/rcutorture/configs/rcu/TREE04.boot
tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcuperf/TREE [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcuperf/TREE54 [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh [new file with mode: 0644]
tools/testing/selftests/sigaltstack/Makefile [new file with mode: 0644]
tools/testing/selftests/sigaltstack/sas.c [new file with mode: 0644]

index c156a8b4d845cd3fbf29bbcb43213066dafd046b..08b80428f5837001940381417e0b1d0be1125b2c 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -69,6 +69,7 @@ Jean Tourrilhes <jt@hpl.hp.com>
 Jeff Garzik <jgarzik@pretzel.yyz.us>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
+John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
 John Stultz <johnstul@us.ibm.com>
 <josh@joshtriplett.org> <josh@freedesktop.org>
 <josh@joshtriplett.org> <josh@kernel.org>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCU.svg
new file mode 100644 (file)
index 0000000..727e270
--- /dev/null
@@ -0,0 +1,474 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:28:20 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="9.1in"
+   height="8.9in"
+   viewBox="-66 -66 10932 10707"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCU.fig">
+  <metadata
+     id="metadata106">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs104">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3864"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="973"
+     inkscape:window-height="1137"
+     id="namedview102"
+     showgrid="false"
+     inkscape:zoom="0.9743589"
+     inkscape:cx="409.50003"
+     inkscape:cy="400.49997"
+     inkscape:window-x="915"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="10800"
+       height="5625"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="3825"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="6525"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line -->
+    <polyline
+       points="3375,6525 3375,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline14" />
+    <!-- Arrowhead on XXXpoint 3375 6525 - 3375 4860-->
+    <!-- Circle -->
+    <circle
+       cx="7425"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle18" />
+    <!-- Circle -->
+    <circle
+       cx="7875"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle20" />
+    <!-- Circle -->
+    <circle
+       cx="8325"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle22" />
+    <!-- Circle -->
+    <circle
+       cx="2025"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle24" />
+    <!-- Circle -->
+    <circle
+       cx="2475"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="2925"
+       cy="6075"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="4725"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Circle -->
+    <circle
+       cx="5175"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle32" />
+    <!-- Circle -->
+    <circle
+       cx="5625"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle34" />
+    <!-- Line: box -->
+    <rect
+       x="2025"
+       y="6525"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect36" />
+    <!-- Line -->
+    <polyline
+       points="2475,3600 3975,2310 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 2475 3600 - 4116 2190-->
+    <!-- Line -->
+    <polyline
+       points="7875,3600 6372,2310 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline42" />
+    <!-- Arrowhead on XXXpoint 7875 3600 - 6231 2190-->
+    <!-- Line -->
+    <polyline
+       points="6975,8775 6975,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 6975 8775 - 6975 4860-->
+    <!-- Line -->
+    <polyline
+       points="1575,8775 1575,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 1575 8775 - 1575 4860-->
+    <!-- Line -->
+    <polyline
+       points="8775,6525 8775,5046 "
+       style="stroke:#00d1d1;stroke-width:44.9934641;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 8775 6525 - 8775 4860-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="9225"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text58">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text60">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1575"
+       y="10350"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text62">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="6975"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text64">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="7425"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text66">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3375"
+       y="8100"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text68">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="9225"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text70">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text72">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6975"
+       y="10350"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text74">CPU 1007</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="6930"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text76">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="7380"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text78">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="8055"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text80">CPU 1023</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text82">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2475"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text84">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2475"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text86">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7875"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text88">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7875"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text90">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5175"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text92">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5175"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text94">rcu_node</text>
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="8775"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="5625"
+       y="8775"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="7380"
+       y="6480"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect100" />
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBH.svg
new file mode 100644 (file)
index 0000000..9bbb194
--- /dev/null
@@ -0,0 +1,499 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:26:09 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="5.7in"
+   height="6.6in"
+   viewBox="-44 -44 6838 7888"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCUBH.fig">
+  <metadata
+     id="metadata110">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs108">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3868"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Mend"
+       style="overflow:visible;">
+      <path
+         id="path3886"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(0.6) rotate(180) translate(0,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="878"
+     inkscape:window-height="1148"
+     id="namedview106"
+     showgrid="false"
+     inkscape:zoom="1.3547758"
+     inkscape:cx="256.5"
+     inkscape:cy="297"
+     inkscape:window-x="45"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle16" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle18" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle20" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle22" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle24" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle30" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle32" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect42" />
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect48" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect50" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect52" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect54" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect56" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect58" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text60">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text62">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text64">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text66">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text68">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text70">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text72">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text74">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text76">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text78">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text80">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text82">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text84">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text86">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text88">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text90">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text92">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline94" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline98" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline102" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreeClassicRCUBHdyntick.svg
new file mode 100644 (file)
index 0000000..21ba782
--- /dev/null
@@ -0,0 +1,695 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:20:02 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="5.7in"
+   height="8.6in"
+   viewBox="-44 -44 6838 10288"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreeClassicRCUBHdyntick.fig">
+  <metadata
+     id="metadata166">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs164">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3924"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3936"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="845"
+     inkscape:window-height="988"
+     id="namedview162"
+     showgrid="false"
+     inkscape:zoom="1.0452196"
+     inkscape:cx="256.5"
+     inkscape:cy="387.00003"
+     inkscape:window-x="356"
+     inkscape:window-y="61"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5688,5912 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline12" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+    <polyline
+       points="5714 6068 5704 5822 5598 6044 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline14" />
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4486,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline16" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+    <polyline
+       points="4514 7418 4506 7172 4396 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline18" />
+    <!-- Line -->
+    <polyline
+       points="1040,9300 1476,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+    <polyline
+       points="1504 7418 1496 7172 1386 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline22" />
+    <!-- Line -->
+    <polyline
+       points="2240,8100 2676,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+    <polyline
+       points="2704 6218 2696 5972 2586 6194 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline26" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.0045575;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline48" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,8100 2250,6364 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+    <!-- Line -->
+    <polyline
+       points="1050,9300 1050,7564 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4050,7564 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5250,6364 "
+       style="stroke:#00ff00;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline64" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle80" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect106" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text108">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text110">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text112">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text114">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text116">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text118">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text120">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text122">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text124">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text126">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text128">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text130">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text132">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text134">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text138">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text152">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text156">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00455750000000066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline158" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntick.svg
new file mode 100644 (file)
index 0000000..15adcac
--- /dev/null
@@ -0,0 +1,741 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:32:59 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="6.1in"
+   height="8.9in"
+   viewBox="-44 -44 7288 10738"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreePreemptRCUBHdyntick.fig">
+  <metadata
+     id="metadata182">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs180">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3940"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="874"
+     inkscape:window-height="1148"
+     id="namedview178"
+     showgrid="false"
+     inkscape:zoom="1.2097379"
+     inkscape:cx="274.5"
+     inkscape:cy="400.49997"
+     inkscape:window-x="946"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1200"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect16" />
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5688,6362 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
+    <polyline
+       points="5714 6518 5704 6272 5598 6494 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline20" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4486,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
+    <polyline
+       points="4514 7868 4506 7622 4396 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline24" />
+    <!-- Line -->
+    <polyline
+       points="1040,9750 1476,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline26" />
+    <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
+    <polyline
+       points="1504 7868 1496 7622 1386 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline28" />
+    <!-- Line -->
+    <polyline
+       points="2240,8550 2676,6512 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline30" />
+    <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
+    <polyline
+       points="2704 6668 2696 6422 2586 6644 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline32" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 5682,6360 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 5736 6246-->
+    <polyline
+       points="5672 6518 5722 6276 5562 6466 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline36" />
+    <!-- Line -->
+    <polyline
+       points="1010,9750 2642,6360 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 1010 9750 - 2696 6246-->
+    <polyline
+       points="2632 6518 2682 6276 2522 6466 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline40" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="900"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect42" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1500"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect44" />
+    <!-- Line -->
+    <polyline
+       points="1350,3900 2350,3040 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
+    <!-- Line -->
+    <polyline
+       points="4950,3900 3948,3040 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
+    <!-- Line -->
+    <polyline
+       points="4050,7050 4050,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
+    <!-- Line -->
+    <polyline
+       points="1050,7050 1050,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline58" />
+    <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,5850 2250,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline62" />
+    <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,8550 2250,6814 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline66" />
+    <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
+    <!-- Line -->
+    <polyline
+       points="1050,9750 1050,8014 "
+       style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline70" />
+    <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4050,8014 "
+       style="stroke:#00ff00;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline74" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5250,6814 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline78" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle86" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle88" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle90" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle92" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle94" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle96" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle98" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect106" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect108" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="2100"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect110" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect112" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect114" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect116" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect118" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect120" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text122">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text124">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text126">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text128">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text130">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text132">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text134">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text138">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text152">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text160">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6900"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text170">rcu_preempt</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="1200"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text172">rcu_sched</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5850 5250,4864 "
+       style="stroke:#00d1d1;stroke-width:30.00205472;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline174" />
+    <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg b/Documentation/RCU/Design/Data-Structures/BigTreePreemptRCUBHdyntickCB.svg
new file mode 100644 (file)
index 0000000..bbc3801
--- /dev/null
@@ -0,0 +1,858 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:29:48 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="7.4in"
+   height="9.9in"
+   viewBox="-44 -44 8938 11938"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="BigTreePreemptRCUBHdyntickCB.svg">
+  <metadata
+     id="metadata212">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs210">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3970"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="881"
+     inkscape:window-height="1128"
+     id="namedview208"
+     showgrid="false"
+     inkscape:zoom="1.0195195"
+     inkscape:cx="333"
+     inkscape:cy="445.49997"
+     inkscape:window-x="936"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="1200"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect16" />
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5688,6362 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5710 6240-->
+    <polyline
+       points="5714 6518 5704 6272 5598 6494 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline20" />
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4486,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4512 7590-->
+    <polyline
+       points="4514 7868 4506 7622 4396 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline24" />
+    <!-- Line -->
+    <polyline
+       points="1040,9750 1476,7712 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline26" />
+    <!-- Arrowhead on XXXpoint 1040 9750 - 1502 7590-->
+    <polyline
+       points="1504 7868 1496 7622 1386 7844 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline28" />
+    <!-- Line -->
+    <polyline
+       points="2240,8550 2676,6512 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline30" />
+    <!-- Arrowhead on XXXpoint 2240 8550 - 2702 6390-->
+    <polyline
+       points="2704 6668 2696 6422 2586 6644 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline32" />
+    <!-- Line -->
+    <polyline
+       points="4050,9600 5692,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 4050 9600 - 5744 5948-->
+    <polyline
+       points="5682 6220 5730 5978 5574 6170 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline36" />
+    <!-- Line -->
+    <polyline
+       points="1086,9600 2728,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 1086 9600 - 2780 5948-->
+    <polyline
+       points="2718 6220 2766 5978 2610 6170 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline40" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="900"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect42" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1500"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect44" />
+    <!-- Line -->
+    <polyline
+       points="1350,3900 2350,3040 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 1350 3900 - 2444 2960-->
+    <!-- Line -->
+    <polyline
+       points="4950,3900 3948,3040 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 4950 3900 - 3854 2960-->
+    <!-- Line -->
+    <polyline
+       points="4050,7050 4050,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 4050 7050 - 4050 4740-->
+    <!-- Line -->
+    <polyline
+       points="1050,7050 1050,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline58" />
+    <!-- Arrowhead on XXXpoint 1050 7050 - 1050 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,5850 2250,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline62" />
+    <!-- Arrowhead on XXXpoint 2250 5850 - 2250 4740-->
+    <!-- Line -->
+    <polyline
+       points="2250,8550 2250,6814 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline66" />
+    <!-- Arrowhead on XXXpoint 2250 8550 - 2250 6690-->
+    <!-- Line -->
+    <polyline
+       points="1050,9750 1050,8014 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline70" />
+    <!-- Arrowhead on XXXpoint 1050 9750 - 1050 7890-->
+    <!-- Line -->
+    <polyline
+       points="4050,9750 4050,8014 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline74" />
+    <!-- Arrowhead on XXXpoint 4050 9750 - 4050 7890-->
+    <!-- Line -->
+    <polyline
+       points="5250,8550 5250,6814 "
+       style="stroke:#00ff00;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline78" />
+    <!-- Arrowhead on XXXpoint 5250 8550 - 5250 6690-->
+    <!-- Line -->
+    <polyline
+       points="6000,6300 8048,7910 "
+       style="stroke:#87cfff;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline82" />
+    <!-- Arrowhead on XXXpoint 6000 6300 - 8146 7986-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle86" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle88" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="4350"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle90" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle92" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle94" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle96" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle98" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle100" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5550"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle102" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="7950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect104" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="9450"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect106" />
+    <!-- Line -->
+    <polyline
+       points="8100,8850 8100,9384 "
+       style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 8100 8850 - 8100 9510-->
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="10950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect112" />
+    <!-- Line -->
+    <polyline
+       points="8100,10350 8100,10884 "
+       style="stroke:#000000;stroke-width:30;stroke-linejoin:miter;stroke-linecap:butt;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 8100 10350 - 8100 11010-->
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect118" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect120" />
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3900"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect122" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect124" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="7050"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect126" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="2100"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect128" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect130" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect132" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9750"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect134" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8550"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect136" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5850"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect138" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="8250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="8550"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="9750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="11250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8100"
+       y="11550"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_head</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="1200"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text152">rcu_sched</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text154">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text160">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text170">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text172">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text174">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text176">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text178">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text180">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7650"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text182">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text184">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text186">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text188">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="10350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text192">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text194">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text196">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8850"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text198">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="9150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text200">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6900"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text202">rcu_preempt</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5850 5250,4864 "
+       style="stroke:#00d1d1;stroke-width:29.99463964;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline204" />
+    <!-- Arrowhead on XXXpoint 5250 5850 - 5250 4740-->
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
new file mode 100644 (file)
index 0000000..7eb47ac
--- /dev/null
@@ -0,0 +1,1333 @@
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
+        "http://www.w3.org/TR/html4/loose.dtd">
+        <html>
+        <head><title>A Tour Through TREE_RCU's Data Structures [LWN.net]</title>
+        <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=iso-8859-1">
+
+           <p>January 27, 2016</p>
+           <p>This article was contributed by Paul E.&nbsp;McKenney</p>
+
+<h3>Introduction</h3>
+
+This document describes RCU's major data structures and their relationship
+to each other.
+
+<ol>
+<li>   <a href="#Data-Structure Relationships">
+       Data-Structure Relationships</a>
+<li>   <a href="#The rcu_state Structure">
+       The <tt>rcu_state</tt> Structure</a>
+<li>   <a href="#The rcu_node Structure">
+       The <tt>rcu_node</tt> Structure</a>
+<li>   <a href="#The rcu_data Structure">
+       The <tt>rcu_data</tt> Structure</a>
+<li>   <a href="#The rcu_dynticks Structure">
+       The <tt>rcu_dynticks</tt> Structure</a>
+<li>   <a href="#The rcu_head Structure">
+       The <tt>rcu_head</tt> Structure</a>
+<li>   <a href="#RCU-Specific Fields in the task_struct Structure">
+       RCU-Specific Fields in the <tt>task_struct</tt> Structure</a>
+<li>   <a href="#Accessor Functions">
+       Accessor Functions</a>
+</ol>
+
+At the end we have the
+<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
+
+<h3><a name="Data-Structure Relationships">Data-Structure Relationships</a></h3>
+
+<p>RCU is for all intents and purposes a large state machine, and its
+data structures maintain the state in such a way as to allow RCU readers
+to execute extremely quickly, while also processing the RCU grace periods
+requested by updaters in an efficient and extremely scalable fashion.
+The efficiency and scalability of RCU updaters is provided primarily
+by a combining tree, as shown below:
+
+</p><p><img src="BigTreeClassicRCU.svg" alt="BigTreeClassicRCU.svg" width="30%">
+
+</p><p>This diagram shows an enclosing <tt>rcu_state</tt> structure
+containing a tree of <tt>rcu_node</tt> structures.
+Each leaf node of the <tt>rcu_node</tt> tree has up to 16
+<tt>rcu_data</tt> structures associated with it, so that there
+are <tt>NR_CPUS</tt> number of <tt>rcu_data</tt> structures,
+one for each possible CPU.
+This structure is adjusted at boot time, if needed, to handle the
+common case where <tt>nr_cpu_ids</tt> is much less than
+<tt>NR_CPUs</tt>.
+For example, a number of Linux distributions set <tt>NR_CPUs=4096</tt>,
+which results in a three-level <tt>rcu_node</tt> tree.
+If the actual hardware has only 16 CPUs, RCU will adjust itself
+at boot time, resulting in an <tt>rcu_node</tt> tree with only a single node.
+
+</p><p>The purpose of this combining tree is to allow per-CPU events
+such as quiescent states, dyntick-idle transitions,
+and CPU hotplug operations to be processed efficiently
+and scalably.
+Quiescent states are recorded by the per-CPU <tt>rcu_data</tt> structures,
+and other events are recorded by the leaf-level <tt>rcu_node</tt>
+structures.
+All of these events are combined at each level of the tree until finally
+grace periods are completed at the tree's root <tt>rcu_node</tt>
+structure.
+A grace period can be completed at the root once every CPU
+(or, in the case of <tt>CONFIG_PREEMPT_RCU</tt>, task)
+has passed through a quiescent state.
+Once a grace period has completed, record of that fact is propagated
+back down the tree.
+
+</p><p>As can be seen from the diagram, on a 64-bit system
+a two-level tree with 64 leaves can accommodate 1,024 CPUs, with a fanout
+of 64 at the root and a fanout of 16 at the leaves.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why isn't the fanout at the leaves also 64?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Because there are more types of events that affect the leaf-level
+       <tt>rcu_node</tt> structures than further up the tree.
+       Therefore, if the leaf <tt>rcu_node</tt> structures have fanout of
+       64, the contention on these structures' <tt>-&gt;structures</tt>
+       becomes excessive.
+       Experimentation on a wide variety of systems has shown that a fanout
+       of 16 works well for the leaves of the <tt>rcu_node</tt> tree.
+       </font>
+
+       <p><font color="ffffff">Of course, further experience with
+       systems having hundreds or thousands of CPUs may demonstrate
+       that the fanout for the non-leaf <tt>rcu_node</tt> structures
+       must also be reduced.
+       Such reduction can be easily carried out when and if it proves
+       necessary.
+       In the meantime, if you are using such a system and running into
+       contention problems on the non-leaf <tt>rcu_node</tt> structures,
+       you may use the <tt>CONFIG_RCU_FANOUT</tt> kernel configuration
+       parameter to reduce the non-leaf fanout as needed.
+       </font>
+
+       <p><font color="ffffff">Kernels built for systems with
+       strong NUMA characteristics might also need to adjust
+       <tt>CONFIG_RCU_FANOUT</tt> so that the domains of the
+       <tt>rcu_node</tt> structures align with hardware boundaries.
+       However, there has thus far been no need for this.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>If your system has more than 1,024 CPUs (or more than 512 CPUs on
+a 32-bit system), then RCU will automatically add more levels to the
+tree.
+For example, if you are crazy enough to build a 64-bit system with 65,536
+CPUs, RCU would configure the <tt>rcu_node</tt> tree as follows:
+
+</p><p><img src="HugeTreeClassicRCU.svg" alt="HugeTreeClassicRCU.svg" width="50%">
+
+</p><p>RCU currently permits up to a four-level tree, which on a 64-bit system
+accommodates up to 4,194,304 CPUs, though only a mere 524,288 CPUs for
+32-bit systems.
+On the other hand, you can set <tt>CONFIG_RCU_FANOUT</tt> to be
+as small as 2 if you wish, which would permit only 16 CPUs, which
+is useful for testing.
+
+</p><p>This multi-level combining tree allows us to get most of the
+performance and scalability
+benefits of partitioning, even though RCU grace-period detection is
+inherently a global operation.
+The trick here is that only the last CPU to report a quiescent state
+into a given <tt>rcu_node</tt> structure need advance to the <tt>rcu_node</tt>
+structure at the next level up the tree.
+This means that at the leaf-level <tt>rcu_node</tt> structure, only
+one access out of sixteen will progress up the tree.
+For the internal <tt>rcu_node</tt> structures, the situation is even
+more extreme:  Only one access out of sixty-four will progress up
+the tree.
+Because the vast majority of the CPUs do not progress up the tree,
+the lock contention remains roughly constant up the tree.
+No matter how many CPUs there are in the system, at most 64 quiescent-state
+reports per grace period will progress all the way to the root
+<tt>rcu_node</tt> structure, thus ensuring that the lock contention
+on that root <tt>rcu_node</tt> structure remains acceptably low.
+
+</p><p>In effect, the combining tree acts like a big shock absorber,
+keeping lock contention under control at all tree levels regardless
+of the level of loading on the system.
+
+</p><p>The Linux kernel actually supports multiple flavors of RCU
+running concurrently, so RCU builds separate data structures for each
+flavor.
+For example, for <tt>CONFIG_TREE_RCU=y</tt> kernels, RCU provides
+rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBH.svg" alt="BigTreeClassicRCUBH.svg" width="33%">
+
+</p><p>Energy efficiency is increasingly important, and for that
+reason the Linux kernel provides <tt>CONFIG_NO_HZ_IDLE</tt>, which
+turns off the scheduling-clock interrupts on idle CPUs, which in
+turn allows those CPUs to attain deeper sleep states and to consume
+less energy.
+CPUs whose scheduling-clock interrupts have been turned off are
+said to be in <i>dyntick-idle mode</i>.
+RCU must handle dyntick-idle CPUs specially
+because RCU would otherwise wake up each CPU on every grace period,
+which would defeat the whole purpose of <tt>CONFIG_NO_HZ_IDLE</tt>.
+RCU uses the <tt>rcu_dynticks</tt> structure to track
+which CPUs are in dyntick idle mode, as shown below:
+
+</p><p><img src="BigTreeClassicRCUBHdyntick.svg" alt="BigTreeClassicRCUBHdyntick.svg" width="33%">
+
+</p><p>However, if a CPU is in dyntick-idle mode, it is in that mode
+for all flavors of RCU.
+Therefore, a single <tt>rcu_dynticks</tt> structure is allocated per
+CPU, and all of a given CPU's <tt>rcu_data</tt> structures share
+that <tt>rcu_dynticks</tt>, as shown in the figure.
+
+</p><p>Kernels built with <tt>CONFIG_PREEMPT_RCU</tt> support
+rcu_preempt in addition to rcu_sched and rcu_bh, as shown below:
+
+</p><p><img src="BigTreePreemptRCUBHdyntick.svg" alt="BigTreePreemptRCUBHdyntick.svg" width="35%">
+
+</p><p>RCU updaters wait for normal grace periods by registering
+RCU callbacks, either directly via <tt>call_rcu()</tt> and
+friends (namely <tt>call_rcu_bh()</tt> and <tt>call_rcu_sched()</tt>),
+there being a separate interface per flavor of RCU)
+or indirectly via <tt>synchronize_rcu()</tt> and friends.
+RCU callbacks are represented by <tt>rcu_head</tt> structures,
+which are queued on <tt>rcu_data</tt> structures while they are
+waiting for a grace period to elapse, as shown in the following figure:
+
+</p><p><img src="BigTreePreemptRCUBHdyntickCB.svg" alt="BigTreePreemptRCUBHdyntickCB.svg" width="40%">
+
+</p><p>This figure shows how <tt>TREE_RCU</tt>'s and
+<tt>PREEMPT_RCU</tt>'s major data structures are related.
+Lesser data structures will be introduced with the algorithms that
+make use of them.
+
+</p><p>Note that each of the data structures in the above figure has
+its own synchronization:
+
+<p><ol>
+<li>   Each <tt>rcu_state</tt> structures has a lock and a mutex,
+       and some fields are protected by the corresponding root
+       <tt>rcu_node</tt> structure's lock.
+<li>   Each <tt>rcu_node</tt> structure has a spinlock.
+<li>   The fields in <tt>rcu_data</tt> are private to the corresponding
+       CPU, although a few can be read and written by other CPUs.
+<li>   Similarly, the fields in <tt>rcu_dynticks</tt> are private
+       to the corresponding CPU, although a few can be read by
+       other CPUs.
+</ol>
+
+<p>It is important to note that different data structures can have
+very different ideas about the state of RCU at any given time.
+For but one example, awareness of the start or end of a given RCU
+grace period propagates slowly through the data structures.
+This slow propagation is absolutely necessary for RCU to have good
+read-side performance.
+If this balkanized implementation seems foreign to you, one useful
+trick is to consider each instance of these data structures to be
+a different person, each having the usual slightly different
+view of reality.
+
+</p><p>The general role of each of these data structures is as
+follows:
+
+</p><ol>
+<li>   <tt>rcu_state</tt>:
+       This structure forms the interconnection between the
+       <tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+       tracks grace periods, serves as short-term repository
+       for callbacks orphaned by CPU-hotplug events,
+       maintains <tt>rcu_barrier()</tt> state,
+       tracks expedited grace-period state,
+       and maintains state used to force quiescent states when
+       grace periods extend too long,
+<li>   <tt>rcu_node</tt>: This structure forms the combining
+       tree that propagates quiescent-state
+       information from the leaves to the root, and also propagates
+       grace-period information from the root to the leaves.
+       It provides local copies of the grace-period state in order
+       to allow this information to be accessed in a synchronized
+       manner without suffering the scalability limitations that
+       would otherwise be imposed by global locking.
+       In <tt>CONFIG_PREEMPT_RCU</tt> kernels, it manages the lists
+       of tasks that have blocked while in their current
+       RCU read-side critical section.
+       In <tt>CONFIG_PREEMPT_RCU</tt> with
+       <tt>CONFIG_RCU_BOOST</tt>, it manages the
+       per-<tt>rcu_node</tt> priority-boosting
+       kernel threads (kthreads) and state.
+       Finally, it records CPU-hotplug state in order to determine
+       which CPUs should be ignored during a given grace period.
+<li>   <tt>rcu_data</tt>: This per-CPU structure is the
+       focus of quiescent-state detection and RCU callback queuing.
+       It also tracks its relationship to the corresponding leaf
+       <tt>rcu_node</tt> structure to allow more-efficient
+       propagation of quiescent states up the <tt>rcu_node</tt>
+       combining tree.
+       Like the <tt>rcu_node</tt> structure, it provides a local
+       copy of the grace-period information to allow for-free
+       synchronized
+       access to this information from the corresponding CPU.
+       Finally, this structure records past dyntick-idle state
+       for the corresponding CPU and also tracks statistics.
+<li>   <tt>rcu_dynticks</tt>:
+       This per-CPU structure tracks the current dyntick-idle
+       state for the corresponding CPU.
+       Unlike the other three structures, the <tt>rcu_dynticks</tt>
+       structure is not replicated per RCU flavor.
+<li>   <tt>rcu_head</tt>:
+       This structure represents RCU callbacks, and is the
+       only structure allocated and managed by RCU users.
+       The <tt>rcu_head</tt> structure is normally embedded
+       within the RCU-protected data structure.
+</ol>
+
+<p>If all you wanted from this article was a general notion of how
+RCU's data structures are related, you are done.
+Otherwise, each of the following sections give more details on
+the <tt>rcu_state</tt>, <tt>rcu_node</tt>, <tt>rcu_data</tt>,
+and <tt>rcu_dynticks</tt> data structures.
+
+<h3><a name="The rcu_state Structure">
+The <tt>rcu_state</tt> Structure</a></h3>
+
+<p>The <tt>rcu_state</tt> structure is the base structure that
+represents a flavor of RCU.
+This structure forms the interconnection between the
+<tt>rcu_node</tt> and <tt>rcu_data</tt> structures,
+tracks grace periods, contains the lock used to
+synchronize with CPU-hotplug events,
+and maintains state used to force quiescent states when
+grace periods extend too long,
+
+</p><p>A few of the <tt>rcu_state</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+The more specialized fields are covered in the discussion of their
+use.
+
+<h5>Relationship to rcu_node and rcu_data Structures</h5>
+
+This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node node[NUM_RCU_NODES];
+  2   struct rcu_node *level[NUM_RCU_LVLS + 1];
+  3   struct rcu_data __percpu *rda;
+</pre>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Wait a minute!
+       You said that the <tt>rcu_node</tt> structures formed a tree,
+       but they are declared as a flat array!
+       What gives?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       The tree is laid out in the array.
+       The first node In the array is the head, the next set of nodes in the
+       array are children of the head node, and so on until the last set of
+       nodes in the array are the leaves.
+       </font>
+
+       <p><font color="ffffff">See the following diagrams to see how
+       this works.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>The <tt>rcu_node</tt> tree is embedded into the
+<tt>-&gt;node[]</tt> array as shown in the following figure:
+
+</p><p><img src="TreeMapping.svg" alt="TreeMapping.svg" width="40%">
+
+</p><p>One interesting consequence of this mapping is that a
+breadth-first traversal of the tree is implemented as a simple
+linear scan of the array, which is in fact what the
+<tt>rcu_for_each_node_breadth_first()</tt> macro does.
+This macro is used at the beginning and ends of grace periods.
+
+</p><p>Each entry of the <tt>-&gt;level</tt> array references
+the first <tt>rcu_node</tt> structure on the corresponding level
+of the tree, for example, as shown below:
+
+</p><p><img src="TreeMappingLevel.svg" alt="TreeMappingLevel.svg" width="40%">
+
+</p><p>The zero<sup>th</sup> element of the array references the root
+<tt>rcu_node</tt> structure, the first element references the
+first child of the root <tt>rcu_node</tt>, and finally the second
+element references the first leaf <tt>rcu_node</tt> structure.
+
+</p><p>For whatever it is worth, if you draw the tree to be tree-shaped
+rather than array-shaped, it is easy to draw a planar representation:
+
+</p><p><img src="TreeLevel.svg" alt="TreeLevel.svg" width="60%">
+
+</p><p>Finally, the <tt>-&gt;rda</tt> field references a per-CPU
+pointer to the corresponding CPU's <tt>rcu_data</tt> structure.
+
+</p><p>All of these fields are constant once initialization is complete,
+and therefore need no protection.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>RCU grace periods are numbered, and
+the <tt>-&gt;gpnum</tt> field contains the number of the grace
+period that started most recently.
+The <tt>-&gt;completed</tt> field contains the number of the
+grace period that completed most recently.
+If the two fields are equal, the RCU grace period that most recently
+started has already completed, and therefore the corresponding
+flavor of RCU is idle.
+If <tt>-&gt;gpnum</tt> is one greater than <tt>-&gt;completed</tt>,
+then <tt>-&gt;gpnum</tt> gives the number of the current RCU
+grace period, which has not yet completed.
+Any other combination of values indicates that something is broken.
+These two fields are protected by the root <tt>rcu_node</tt>'s
+<tt>-&gt;lock</tt> field.
+
+</p><p>There are <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt> fields
+in the <tt>rcu_node</tt> and <tt>rcu_data</tt> structures
+as well.
+The fields in the <tt>rcu_state</tt> structure represent the
+most current values, and those of the other structures are compared
+in order to detect the start of a new grace period in a distributed
+fashion.
+The values flow from <tt>rcu_state</tt> to <tt>rcu_node</tt>
+(down the tree from the root to the leaves) to <tt>rcu_data</tt>.
+
+<h5>Miscellaneous</h5>
+
+<p>This portion of the <tt>rcu_state</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gp_max;
+  2   char abbr;
+  3   char *name;
+</pre>
+
+<p>The <tt>-&gt;gp_max</tt> field tracks the duration of the longest
+grace period in jiffies.
+It is protected by the root <tt>rcu_node</tt>'s <tt>-&gt;lock</tt>.
+
+<p>The <tt>-&gt;name</tt> field points to the name of the RCU flavor
+(for example, &ldquo;rcu_sched&rdquo;), and is constant.
+The <tt>-&gt;abbr</tt> field contains a one-character abbreviation,
+for example, &ldquo;s&rdquo; for RCU-sched.
+
+<h3><a name="The rcu_node Structure">
+The <tt>rcu_node</tt> Structure</a></h3>
+
+<p>The <tt>rcu_node</tt> structures form the combining
+tree that propagates quiescent-state
+information from the leaves to the root and also that propagates
+grace-period information from the root down to the leaves.
+They provides local copies of the grace-period state in order
+to allow this information to be accessed in a synchronized
+manner without suffering the scalability limitations that
+would otherwise be imposed by global locking.
+In <tt>CONFIG_PREEMPT_RCU</tt> kernels, they manage the lists
+of tasks that have blocked while in their current
+RCU read-side critical section.
+In <tt>CONFIG_PREEMPT_RCU</tt> with
+<tt>CONFIG_RCU_BOOST</tt>, they manage the
+per-<tt>rcu_node</tt> priority-boosting
+kernel threads (kthreads) and state.
+Finally, they record CPU-hotplug state in order to determine
+which CPUs should be ignored during a given grace period.
+
+</p><p>The <tt>rcu_node</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Combining Tree</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   struct rcu_node *parent;
+  2   u8 level;
+  3   u8 grpnum;
+  4   unsigned long grpmask;
+  5   int grplo;
+  6   int grphi;
+</pre>
+
+<p>The <tt>-&gt;parent</tt> pointer references the <tt>rcu_node</tt>
+one level up in the tree, and is <tt>NULL</tt> for the root
+<tt>rcu_node</tt>.
+The RCU implementation makes heavy use of this field to push quiescent
+states up the tree.
+The <tt>-&gt;level</tt> field gives the level in the tree, with
+the root being at level zero, its children at level one, and so on.
+The <tt>-&gt;grpnum</tt> field gives this node's position within
+the children of its parent, so this number can range between 0 and 31
+on 32-bit systems and between 0 and 63 on 64-bit systems.
+The <tt>-&gt;level</tt> and <tt>-&gt;grpnum</tt> fields are
+used only during initialization and for tracing.
+The <tt>-&gt;grpmask</tt> field is the bitmask counterpart of
+<tt>-&gt;grpnum</tt>, and therefore always has exactly one bit set.
+This mask is used to clear the bit corresponding to this <tt>rcu_node</tt>
+structure in its parent's bitmasks, which are described later.
+Finally, the <tt>-&gt;grplo</tt> and <tt>-&gt;grphi</tt> fields
+contain the lowest and highest numbered CPU served by this
+<tt>rcu_node</tt> structure, respectively.
+
+</p><p>All of these fields are constant, and thus do not require any
+synchronization.
+
+<h5>Synchronization</h5>
+
+<p>This field of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   raw_spinlock_t lock;
+</pre>
+
+<p>This field is used to protect the remaining fields in this structure,
+unless otherwise stated.
+That said, all of the fields in this structure can be accessed without
+locking for tracing purposes.
+Yes, this can result in confusing traces, but better some tracing confusion
+than to be heisenbugged out of existence.
+
+<h5>Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_node</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long gpnum;
+  2   unsigned long completed;
+</pre>
+
+<p>These fields are the counterparts of the fields of the same name in
+the <tt>rcu_state</tt> structure.
+They each may lag up to one behind their <tt>rcu_state</tt>
+counterparts.
+If a given <tt>rcu_node</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_node</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_node</tt> believes
+is still being waited for.
+
+</p><p>The <tt>&gt;gpnum</tt> field of each <tt>rcu_node</tt>
+structure is updated at the beginning
+of each grace period, and the <tt>-&gt;completed</tt> fields are
+updated at the end of each grace period.
+
+<h5>Quiescent-State Tracking</h5>
+
+<p>These fields manage the propagation of quiescent states up the
+combining tree.
+
+</p><p>This portion of the <tt>rcu_node</tt> structure has fields
+as follows:
+
+<pre>
+  1   unsigned long qsmask;
+  2   unsigned long expmask;
+  3   unsigned long qsmaskinit;
+  4   unsigned long expmaskinit;
+</pre>
+
+<p>The <tt>-&gt;qsmask</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current normal grace period.
+Such children will have a value of 1 in their corresponding bit.
+Note that the leaf <tt>rcu_node</tt> structures should be
+thought of as having <tt>rcu_data</tt> structures as their
+children.
+Similarly, the <tt>-&gt;expmask</tt> field tracks which
+of this <tt>rcu_node</tt> structure's children still need to report
+quiescent states for the current expedited grace period.
+An expedited grace period has
+the same conceptual properties as a normal grace period, but the
+expedited implementation accepts extreme CPU overhead to obtain
+much lower grace-period latency, for example, consuming a few
+tens of microseconds worth of CPU time to reduce grace-period
+duration from milliseconds to tens of microseconds.
+The <tt>-&gt;qsmaskinit</tt> field tracks which of this
+<tt>rcu_node</tt> structure's children cover for at least
+one online CPU.
+This mask is used to initialize <tt>-&gt;qsmask</tt>,
+and <tt>-&gt;expmaskinit</tt> is used to initialize
+<tt>-&gt;expmask</tt> and the beginning of the
+normal and expedited grace periods, respectively.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why are these bitmasks protected by locking?
+       Come on, haven't you heard of atomic instructions???
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Lockless grace-period computation!  Such a tantalizing possibility!
+       </font>
+
+       <p><font color="ffffff">But consider the following sequence of events:
+       </font>
+
+       <ol>
+       <li>    <font color="ffffff">CPU&nbsp;0 has been in dyntick-idle
+               mode for quite some time.
+               When it wakes up, it notices that the current RCU
+               grace period needs it to report in, so it sets a
+               flag where the scheduling clock interrupt will find it.
+               </font><p>
+       <li>    <font color="ffffff">Meanwhile, CPU&nbsp;1 is running
+               <tt>force_quiescent_state()</tt>,
+               and notices that CPU&nbsp;0 has been in dyntick idle mode,
+               which qualifies as an extended quiescent state.
+               </font><p>
+       <li>    <font color="ffffff">CPU&nbsp;0's scheduling clock
+               interrupt fires in the
+               middle of an RCU read-side critical section, and notices
+               that the RCU core needs something, so commences RCU softirq
+               processing.
+               </font>
+               <p>
+       <li>    <font color="ffffff">CPU&nbsp;0's softirq handler
+               executes and is just about ready
+               to report its quiescent state up the <tt>rcu_node</tt>
+               tree.
+               </font><p>
+       <li>    <font color="ffffff">But CPU&nbsp;1 beats it to the punch,
+               completing the current
+               grace period and starting a new one.
+               </font><p>
+       <li>    <font color="ffffff">CPU&nbsp;0 now reports its quiescent
+               state for the wrong
+               grace period.
+               That grace period might now end before the RCU read-side
+               critical section.
+               If that happens, disaster will ensue.
+               </font>
+       </ol>
+
+       <p><font color="ffffff">So the locking is absolutely required in
+       order to coordinate
+       clearing of the bits with the grace-period numbers in
+       <tt>-&gt;gpnum</tt> and <tt>-&gt;completed</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h5>Blocked-Task Management</h5>
+
+<p><tt>PREEMPT_RCU</tt> allows tasks to be preempted in the
+midst of their RCU read-side critical sections, and these tasks
+must be tracked explicitly.
+The details of exactly why and how they are tracked will be covered
+in a separate article on RCU read-side processing.
+For now, it is enough to know that the <tt>rcu_node</tt>
+structure tracks them.
+
+<pre>
+  1   struct list_head blkd_tasks;
+  2   struct list_head *gp_tasks;
+  3   struct list_head *exp_tasks;
+  4   bool wait_blkd_tasks;
+</pre>
+
+<p>The <tt>-&gt;blkd_tasks</tt> field is a list header for
+the list of blocked and preempted tasks.
+As tasks undergo context switches within RCU read-side critical
+sections, their <tt>task_struct</tt> structures are enqueued
+(via the <tt>task_struct</tt>'s <tt>-&gt;rcu_node_entry</tt>
+field) onto the head of the <tt>-&gt;blkd_tasks</tt> list for the
+leaf <tt>rcu_node</tt> structure corresponding to the CPU
+on which the outgoing context switch executed.
+As these tasks later exit their RCU read-side critical sections,
+they remove themselves from the list.
+This list is therefore in reverse time order, so that if one of the tasks
+is blocking the current grace period, all subsequent tasks must
+also be blocking that same grace period.
+Therefore, a single pointer into this list suffices to track
+all tasks blocking a given grace period.
+That pointer is stored in <tt>-&gt;gp_tasks</tt> for normal
+grace periods and in <tt>-&gt;exp_tasks</tt> for expedited
+grace periods.
+These last two fields are <tt>NULL</tt> if either there is
+no grace period in flight or if there are no blocked tasks
+preventing that grace period from completing.
+If either of these two pointers is referencing a task that
+removes itself from the <tt>-&gt;blkd_tasks</tt> list,
+then that task must advance the pointer to the next task on
+the list, or set the pointer to <tt>NULL</tt> if there
+are no subsequent tasks on the list.
+
+</p><p>For example, suppose that tasks&nbsp;T1, T2, and&nbsp;T3 are
+all hard-affinitied to the largest-numbered CPU in the system.
+Then if task&nbsp;T1 blocked in an RCU read-side
+critical section, then an expedited grace period started,
+then task&nbsp;T2 blocked in an RCU read-side critical section,
+then a normal grace period started, and finally task&nbsp;3 blocked
+in an RCU read-side critical section, then the state of the
+last leaf <tt>rcu_node</tt> structure's blocked-task list
+would be as shown below:
+
+</p><p><img src="blkd_task.svg" alt="blkd_task.svg" width="60%">
+
+</p><p>Task&nbsp;T1 is blocking both grace periods, task&nbsp;T2 is
+blocking only the normal grace period, and task&nbsp;T3 is blocking
+neither grace period.
+Note that these tasks will not remove themselves from this list
+immediately upon resuming execution.
+They will instead remain on the list until they execute the outermost
+<tt>rcu_read_unlock()</tt> that ends their RCU read-side critical
+section.
+
+<p>
+The <tt>-&gt;wait_blkd_tasks</tt> field indicates whether or not
+the current grace period is waiting on a blocked task.
+
+<h5>Sizing the <tt>rcu_node</tt> Array</h5>
+
+<p>The <tt>rcu_node</tt> array is sized via a series of
+C-preprocessor expressions as follows:
+
+<pre>
+ 1 #ifdef CONFIG_RCU_FANOUT
+ 2 #define RCU_FANOUT CONFIG_RCU_FANOUT
+ 3 #else
+ 4 # ifdef CONFIG_64BIT
+ 5 # define RCU_FANOUT 64
+ 6 # else
+ 7 # define RCU_FANOUT 32
+ 8 # endif
+ 9 #endif
+10
+11 #ifdef CONFIG_RCU_FANOUT_LEAF
+12 #define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+13 #else
+14 # ifdef CONFIG_64BIT
+15 # define RCU_FANOUT_LEAF 64
+16 # else
+17 # define RCU_FANOUT_LEAF 32
+18 # endif
+19 #endif
+20
+21 #define RCU_FANOUT_1        (RCU_FANOUT_LEAF)
+22 #define RCU_FANOUT_2        (RCU_FANOUT_1 * RCU_FANOUT)
+23 #define RCU_FANOUT_3        (RCU_FANOUT_2 * RCU_FANOUT)
+24 #define RCU_FANOUT_4        (RCU_FANOUT_3 * RCU_FANOUT)
+25
+26 #if NR_CPUS &lt;= RCU_FANOUT_1
+27 #  define RCU_NUM_LVLS        1
+28 #  define NUM_RCU_LVL_0        1
+29 #  define NUM_RCU_NODES        NUM_RCU_LVL_0
+30 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
+31 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
+32 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
+33 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
+34 #elif NR_CPUS &lt;= RCU_FANOUT_2
+35 #  define RCU_NUM_LVLS        2
+36 #  define NUM_RCU_LVL_0        1
+37 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+38 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+39 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+40 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
+41 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+42 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
+43 #elif NR_CPUS &lt;= RCU_FANOUT_3
+44 #  define RCU_NUM_LVLS        3
+45 #  define NUM_RCU_LVL_0        1
+46 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+47 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+48 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+49 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+50 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+51 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+52 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
+53 #elif NR_CPUS &lt;= RCU_FANOUT_4
+54 #  define RCU_NUM_LVLS        4
+55 #  define NUM_RCU_LVL_0        1
+56 #  define NUM_RCU_LVL_1        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+57 #  define NUM_RCU_LVL_2        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+58 #  define NUM_RCU_LVL_3        DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+59 #  define NUM_RCU_NODES        (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+60 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+61 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+62 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+63 #  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
+64 #else
+65 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+66 #endif
+</pre>
+
+<p>The maximum number of levels in the <tt>rcu_node</tt> structure
+is currently limited to four, as specified by lines&nbsp;21-24
+and the structure of the subsequent &ldquo;if&rdquo; statement.
+For 32-bit systems, this allows 16*32*32*32=524,288 CPUs, which
+should be sufficient for the next few years at least.
+For 64-bit systems, 16*64*64*64=4,194,304 CPUs is allowed, which
+should see us through the next decade or so.
+This four-level tree also allows kernels built with
+<tt>CONFIG_RCU_FANOUT=8</tt> to support up to 4096 CPUs,
+which might be useful in very large systems having eight CPUs per
+socket (but please note that no one has yet shown any measurable
+performance degradation due to misaligned socket and <tt>rcu_node</tt>
+boundaries).
+In addition, building kernels with a full four levels of <tt>rcu_node</tt>
+tree permits better testing of RCU's combining-tree code.
+
+</p><p>The <tt>RCU_FANOUT</tt> symbol controls how many children
+are permitted at each non-leaf level of the <tt>rcu_node</tt> tree.
+If the <tt>CONFIG_RCU_FANOUT</tt> Kconfig option is not specified,
+it is set based on the word size of the system, which is also
+the Kconfig default.
+
+</p><p>The <tt>RCU_FANOUT_LEAF</tt> symbol controls how many CPUs are
+handled by each leaf <tt>rcu_node</tt> structure.
+Experience has shown that allowing a given leaf <tt>rcu_node</tt>
+structure to handle 64 CPUs, as permitted by the number of bits in
+the <tt>-&gt;qsmask</tt> field on a 64-bit system, results in
+excessive contention for the leaf <tt>rcu_node</tt> structures'
+<tt>-&gt;lock</tt> fields.
+The number of CPUs per leaf <tt>rcu_node</tt> structure is therefore
+limited to 16 given the default value of <tt>CONFIG_RCU_FANOUT_LEAF</tt>.
+If <tt>CONFIG_RCU_FANOUT_LEAF</tt> is unspecified, the value
+selected is based on the word size of the system, just as for
+<tt>CONFIG_RCU_FANOUT</tt>.
+Lines&nbsp;11-19 perform this computation.
+
+</p><p>Lines&nbsp;21-24 compute the maximum number of CPUs supported by
+a single-level (which contains a single <tt>rcu_node</tt> structure),
+two-level, three-level, and four-level <tt>rcu_node</tt> tree,
+respectively, given the fanout specified by <tt>RCU_FANOUT</tt>
+and <tt>RCU_FANOUT_LEAF</tt>.
+These numbers of CPUs are retained in the
+<tt>RCU_FANOUT_1</tt>,
+<tt>RCU_FANOUT_2</tt>,
+<tt>RCU_FANOUT_3</tt>, and
+<tt>RCU_FANOUT_4</tt>
+C-preprocessor variables, respectively.
+
+</p><p>These variables are used to control the C-preprocessor <tt>#if</tt>
+statement spanning lines&nbsp;26-66 that computes the number of
+<tt>rcu_node</tt> structures required for each level of the tree,
+as well as the number of levels required.
+The number of levels is placed in the <tt>NUM_RCU_LVLS</tt>
+C-preprocessor variable by lines&nbsp;27, 35, 44, and&nbsp;54.
+The number of <tt>rcu_node</tt> structures for the topmost level
+of the tree is always exactly one, and this value is unconditionally
+placed into <tt>NUM_RCU_LVL_0</tt> by lines&nbsp;28, 36, 45, and&nbsp;55.
+The rest of the levels (if any) of the <tt>rcu_node</tt> tree
+are computed by dividing the maximum number of CPUs by the
+fanout supported by the number of levels from the current level down,
+rounding up.  This computation is performed by lines&nbsp;37,
+46-47, and&nbsp;56-58.
+Lines&nbsp;31-33, 40-42, 50-52, and&nbsp;62-63 create initializers
+for lockdep lock-class names.
+Finally, lines&nbsp;64-66 produce an error if the maximum number of
+CPUs is too large for the specified fanout.
+
+<h3><a name="The rcu_data Structure">
+The <tt>rcu_data</tt> Structure</a></h3>
+
+<p>The <tt>rcu_data</tt> maintains the per-CPU state for the
+corresponding flavor of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+This structure is the
+focus of quiescent-state detection and RCU callback queuing.
+It also tracks its relationship to the corresponding leaf
+<tt>rcu_node</tt> structure to allow more-efficient
+propagation of quiescent states up the <tt>rcu_node</tt>
+combining tree.
+Like the <tt>rcu_node</tt> structure, it provides a local
+copy of the grace-period information to allow for-free
+synchronized
+access to this information from the corresponding CPU.
+Finally, this structure records past dyntick-idle state
+for the corresponding CPU and also tracks statistics.
+
+</p><p>The <tt>rcu_data</tt> structure's fields are discussed,
+singly and in groups, in the following sections.
+
+<h5>Connection to Other Data Structures</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int cpu;
+  2   struct rcu_state *rsp;
+  3   struct rcu_node *mynode;
+  4   struct rcu_dynticks *dynticks;
+  5   unsigned long grpmask;
+  6   bool beenonline;
+</pre>
+
+<p>The <tt>-&gt;cpu</tt> field contains the number of the
+corresponding CPU, the <tt>-&gt;rsp</tt> pointer references
+the corresponding <tt>rcu_state</tt> structure (and is most frequently
+used to locate the name of the corresponding flavor of RCU for tracing),
+and the <tt>-&gt;mynode</tt> field references the corresponding
+<tt>rcu_node</tt> structure.
+The <tt>-&gt;mynode</tt> is used to propagate quiescent states
+up the combining tree.
+<p>The <tt>-&gt;dynticks</tt> pointer references the
+<tt>rcu_dynticks</tt> structure corresponding to this
+CPU.
+Recall that a single per-CPU instance of the <tt>rcu_dynticks</tt>
+structure is shared among all flavors of RCU.
+These first four fields are constant and therefore require not
+synchronization.
+
+</p><p>The <tt>-&gt;grpmask</tt> field indicates the bit in
+the <tt>-&gt;mynode-&gt;qsmask</tt> corresponding to this
+<tt>rcu_data</tt> structure, and is also used when propagating
+quiescent states.
+The <tt>-&gt;beenonline</tt> flag is set whenever the corresponding
+CPU comes online, which means that the debugfs tracing need not dump
+out any <tt>rcu_data</tt> structure for which this flag is not set.
+
+<h5>Quiescent-State and Grace-Period Tracking</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   unsigned long completed;
+  2   unsigned long gpnum;
+  3   bool cpu_no_qs;
+  4   bool core_needs_qs;
+  5   bool gpwrap;
+  6   unsigned long rcu_qs_ctr_snap;
+</pre>
+
+<p>The <tt>completed</tt> and <tt>gpnum</tt>
+fields are the counterparts of the fields of the same name
+in the <tt>rcu_state</tt> and <tt>rcu_node</tt> structures.
+They may each lag up to one behind their <tt>rcu_node</tt>
+counterparts, but in <tt>CONFIG_NO_HZ_IDLE</tt> and
+<tt>CONFIG_NO_HZ_FULL</tt> kernels can lag
+arbitrarily far behind for CPUs in dyntick-idle mode (but these counters
+will catch up upon exit from dyntick-idle mode).
+If a given <tt>rcu_data</tt> structure's <tt>-&gt;gpnum</tt> and
+<tt>-&gt;complete</tt> fields are equal, then this <tt>rcu_data</tt>
+structure believes that RCU is idle.
+Otherwise, as with the <tt>rcu_state</tt> and <tt>rcu_node</tt>
+structure,
+the <tt>-&gt;gpnum</tt> field will be one greater than the
+<tt>-&gt;complete</tt> fields, with <tt>-&gt;gpnum</tt>
+indicating which grace period this <tt>rcu_data</tt> believes
+is still being waited for.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       All this replication of the grace period numbers can only cause
+       massive confusion.
+       Why not just keep a global pair of counters and be done with it???
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Because if there was only a single global pair of grace-period
+       numbers, there would need to be a single global lock to allow
+       safely accessing and updating them.
+       And if we are not going to have a single global lock, we need
+       to carefully manage the numbers on a per-node basis.
+       Recall from the answer to a previous Quick Quiz that the consequences
+       of applying a previously sampled quiescent state to the wrong
+       grace period are quite severe.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>The <tt>-&gt;cpu_no_qs</tt> flag indicates that the
+CPU has not yet passed through a quiescent state,
+while the <tt>-&gt;core_needs_qs</tt> flag indicates that the
+RCU core needs a quiescent state from the corresponding CPU.
+The <tt>-&gt;gpwrap</tt> field indicates that the corresponding
+CPU has remained idle for so long that the <tt>completed</tt>
+and <tt>gpnum</tt> counters are in danger of overflow, which
+will cause the CPU to disregard the values of its counters on
+its next exit from idle.
+Finally, the <tt>rcu_qs_ctr_snap</tt> field is used to detect
+cases where a given operation has resulted in a quiescent state
+for all flavors of RCU, for example, <tt>cond_resched_rcu_qs()</tt>.
+
+<h5>RCU Callback Handling</h5>
+
+<p>In the absence of CPU-hotplug events, RCU callbacks are invoked by
+the same CPU that registered them.
+This is strictly a cache-locality optimization: callbacks can and
+do get invoked on CPUs other than the one that registered them.
+After all, if the CPU that registered a given callback has gone
+offline before the callback can be invoked, there really is no other
+choice.
+
+</p><p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+ 1 struct rcu_head *nxtlist;
+ 2 struct rcu_head **nxttail[RCU_NEXT_SIZE];
+ 3 unsigned long nxtcompleted[RCU_NEXT_SIZE];
+ 4 long qlen_lazy;
+ 5 long qlen;
+ 6 long qlen_last_fqs_check;
+ 7 unsigned long n_force_qs_snap;
+ 8 unsigned long n_cbs_invoked;
+ 9 unsigned long n_cbs_orphaned;
+10 unsigned long n_cbs_adopted;
+11 long blimit;
+</pre>
+
+<p>The <tt>-&gt;nxtlist</tt> pointer and the
+<tt>-&gt;nxttail[]</tt> array form a four-segment list with
+older callbacks near the head and newer ones near the tail.
+Each segment contains callbacks with the corresponding relationship
+to the current grace period.
+The pointer out of the end of each of the four segments is referenced
+by the element of the <tt>-&gt;nxttail[]</tt> array indexed by
+<tt>RCU_DONE_TAIL</tt> (for callbacks handled by a prior grace period),
+<tt>RCU_WAIT_TAIL</tt> (for callbacks waiting on the current grace period),
+<tt>RCU_NEXT_READY_TAIL</tt> (for callbacks that will wait on the next
+grace period), and
+<tt>RCU_NEXT_TAIL</tt> (for callbacks that are not yet associated
+with a specific grace period)
+respectively, as shown in the following figure.
+
+</p><p><img src="nxtlist.svg" alt="nxtlist.svg" width="40%">
+
+</p><p>In this figure, the <tt>-&gt;nxtlist</tt> pointer references the
+first
+RCU callback in the list.
+The <tt>-&gt;nxttail[RCU_DONE_TAIL]</tt> array element references
+the <tt>-&gt;nxtlist</tt> pointer itself, indicating that none
+of the callbacks is ready to invoke.
+The <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt> array element references callback
+CB&nbsp;2's <tt>-&gt;next</tt> pointer, which indicates that
+CB&nbsp;1 and CB&nbsp;2 are both waiting on the current grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_READY_TAIL]</tt> array element
+references the same RCU callback that <tt>-&gt;nxttail[RCU_WAIT_TAIL]</tt>
+does, which indicates that there are no callbacks waiting on the next
+RCU grace period.
+The <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element references
+CB&nbsp;4's <tt>-&gt;next</tt> pointer, indicating that all the
+remaining RCU callbacks have not yet been assigned to an RCU grace
+period.
+Note that the <tt>-&gt;nxttail[RCU_NEXT_TAIL]</tt> array element
+always references the last RCU callback's <tt>-&gt;next</tt> pointer
+unless the callback list is empty, in which case it references
+the <tt>-&gt;nxtlist</tt> pointer.
+
+</p><p>CPUs advance their callbacks from the
+<tt>RCU_NEXT_TAIL</tt> to the <tt>RCU_NEXT_READY_TAIL</tt> to the
+<tt>RCU_WAIT_TAIL</tt> to the <tt>RCU_DONE_TAIL</tt> list segments
+as grace periods advance.
+The CPU advances the callbacks in its <tt>rcu_data</tt> structure
+whenever it notices that another RCU grace period has completed.
+The CPU detects the completion of an RCU grace period by noticing
+that the value of its <tt>rcu_data</tt> structure's
+<tt>-&gt;completed</tt> field differs from that of its leaf
+<tt>rcu_node</tt> structure.
+Recall that each <tt>rcu_node</tt> structure's
+<tt>-&gt;completed</tt> field is updated at the end of each
+grace period.
+
+</p><p>The <tt>-&gt;nxtcompleted[]</tt> array records grace-period
+numbers corresponding to the list segments.
+This allows CPUs that go idle for extended periods to determine
+which of their callbacks are ready to be invoked after reawakening.
+
+</p><p>The <tt>-&gt;qlen</tt> counter contains the number of
+callbacks in <tt>-&gt;nxtlist</tt>, and the
+<tt>-&gt;qlen_lazy</tt> contains the number of those callbacks that
+are known to only free memory, and whose invocation can therefore
+be safely deferred.
+The <tt>-&gt;qlen_last_fqs_check</tt> and
+<tt>-&gt;n_force_qs_snap</tt> coordinate the forcing of quiescent
+states from <tt>call_rcu()</tt> and friends when callback
+lists grow excessively long.
+
+</p><p>The <tt>-&gt;n_cbs_invoked</tt>,
+<tt>-&gt;n_cbs_orphaned</tt>, and <tt>-&gt;n_cbs_adopted</tt>
+fields count the number of callbacks invoked,
+sent to other CPUs when this CPU goes offline,
+and received from other CPUs when those other CPUs go offline.
+Finally, the <tt>-&gt;blimit</tt> counter is the maximum number of
+RCU callbacks that may be invoked at a given time.
+
+<h5>Dyntick-Idle Handling</h5>
+
+<p>This portion of the <tt>rcu_data</tt> structure is declared
+as follows:
+
+<pre>
+  1   int dynticks_snap;
+  2   unsigned long dynticks_fqs;
+</pre>
+
+The <tt>-&gt;dynticks_snap</tt> field is used to take a snapshot
+of the corresponding CPU's dyntick-idle state when forcing
+quiescent states, and is therefore accessed from other CPUs.
+Finally, the <tt>-&gt;dynticks_fqs</tt> field is used to
+count the number of times this CPU is determined to be in
+dyntick-idle state, and is used for tracing and debugging purposes.
+
+<h3><a name="The rcu_dynticks Structure">
+The <tt>rcu_dynticks</tt> Structure</a></h3>
+
+<p>The <tt>rcu_dynticks</tt> maintains the per-CPU dyntick-idle state
+for the corresponding CPU.
+Unlike the other structures, <tt>rcu_dynticks</tt> is not
+replicated over the different flavors of RCU.
+The fields in this structure may be accessed only from the corresponding
+CPU (and from tracing) unless otherwise stated.
+Its fields are as follows:
+
+<pre>
+  1   int dynticks_nesting;
+  2   int dynticks_nmi_nesting;
+  3   atomic_t dynticks;
+</pre>
+
+<p>The <tt>-&gt;dynticks_nesting</tt> field counts the
+nesting depth of normal interrupts.
+In addition, this counter is incremented when exiting dyntick-idle
+mode and decremented when entering it.
+This counter can therefore be thought of as counting the number
+of reasons why this CPU cannot be permitted to enter dyntick-idle
+mode, aside from non-maskable interrupts (NMIs).
+NMIs are counted by the <tt>-&gt;dynticks_nmi_nesting</tt>
+field, except that NMIs that interrupt non-dyntick-idle execution
+are not counted.
+
+</p><p>Finally, the <tt>-&gt;dynticks</tt> field counts the corresponding
+CPU's transitions to and from dyntick-idle mode, so that this counter
+has an even value when the CPU is in dyntick-idle mode and an odd
+value otherwise.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why not just count all NMIs?
+       Wouldn't that be simpler and less error prone?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       It seems simpler only until you think hard about how to go about
+       updating the <tt>rcu_dynticks</tt> structure's
+       <tt>-&gt;dynticks</tt> field.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>Additional fields are present for some special-purpose
+builds, and are discussed separately.
+
+<h3><a name="The rcu_head Structure">
+The <tt>rcu_head</tt> Structure</a></h3>
+
+<p>Each <tt>rcu_head</tt> structure represents an RCU callback.
+These structures are normally embedded within RCU-protected data
+structures whose algorithms use asynchronous grace periods.
+In contrast, when using algorithms that block waiting for RCU grace periods,
+RCU users need not provide <tt>rcu_head</tt> structures.
+
+</p><p>The <tt>rcu_head</tt> structure has fields as follows:
+
+<pre>
+  1   struct rcu_head *next;
+  2   void (*func)(struct rcu_head *head);
+</pre>
+
+<p>The <tt>-&gt;next</tt> field is used
+to link the <tt>rcu_head</tt> structures together in the
+lists within the <tt>rcu_data</tt> structures.
+The <tt>-&gt;func</tt> field is a pointer to the function
+to be called when the callback is ready to be invoked, and
+this function is passed a pointer to the <tt>rcu_head</tt>
+structure.
+However, <tt>kfree_rcu()</tt> uses the <tt>-&gt;func</tt>
+field to record the offset of the <tt>rcu_head</tt>
+structure within the enclosing RCU-protected data structure.
+
+</p><p>Both of these fields are used internally by RCU.
+From the viewpoint of RCU users, this structure is an
+opaque &ldquo;cookie&rdquo;.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Given that the callback function <tt>-&gt;func</tt>
+       is passed a pointer to the <tt>rcu_head</tt> structure,
+       how is that function supposed to find the beginning of the
+       enclosing RCU-protected data structure?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       In actual practice, there is a separate callback function per
+       type of RCU-protected data structure.
+       The callback function can therefore use the <tt>container_of()</tt>
+       macro in the Linux kernel (or other pointer-manipulation facilities
+       in other software environments) to find the beginning of the
+       enclosing structure.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h3><a name="RCU-Specific Fields in the task_struct Structure">
+RCU-Specific Fields in the <tt>task_struct</tt> Structure</a></h3>
+
+<p>The <tt>CONFIG_PREEMPT_RCU</tt> implementation uses some
+additional fields in the <tt>task_struct</tt> structure:
+
+<pre>
+ 1 #ifdef CONFIG_PREEMPT_RCU
+ 2   int rcu_read_lock_nesting;
+ 3   union rcu_special rcu_read_unlock_special;
+ 4   struct list_head rcu_node_entry;
+ 5   struct rcu_node *rcu_blocked_node;
+ 6 #endif /* #ifdef CONFIG_PREEMPT_RCU */
+ 7 #ifdef CONFIG_TASKS_RCU
+ 8   unsigned long rcu_tasks_nvcsw;
+ 9   bool rcu_tasks_holdout;
+10   struct list_head rcu_tasks_holdout_list;
+11   int rcu_tasks_idle_cpu;
+12 #endif /* #ifdef CONFIG_TASKS_RCU */
+</pre>
+
+<p>The <tt>-&gt;rcu_read_lock_nesting</tt> field records the
+nesting level for RCU read-side critical sections, and
+the <tt>-&gt;rcu_read_unlock_special</tt> field is a bitmask
+that records special conditions that require <tt>rcu_read_unlock()</tt>
+to do additional work.
+The <tt>-&gt;rcu_node_entry</tt> field is used to form lists of
+tasks that have blocked within preemptible-RCU read-side critical
+sections and the <tt>-&gt;rcu_blocked_node</tt> field references
+the <tt>rcu_node</tt> structure whose list this task is a member of,
+or <tt>NULL</tt> if it is not blocked within a preemptible-RCU
+read-side critical section.
+
+<p>The <tt>-&gt;rcu_tasks_nvcsw</tt> field tracks the number of
+voluntary context switches that this task had undergone at the
+beginning of the current tasks-RCU grace period,
+<tt>-&gt;rcu_tasks_holdout</tt> is set if the current tasks-RCU
+grace period is waiting on this task, <tt>-&gt;rcu_tasks_holdout_list</tt>
+is a list element enqueuing this task on the holdout list,
+and <tt>-&gt;rcu_tasks_idle_cpu</tt> tracks which CPU this
+idle task is running, but only if the task is currently running,
+that is, if the CPU is currently idle.
+
+<h3><a name="Accessor Functions">
+Accessor Functions</a></h3>
+
+<p>The following listing shows the
+<tt>rcu_get_root()</tt>, <tt>rcu_for_each_node_breadth_first</tt>,
+<tt>rcu_for_each_nonleaf_node_breadth_first()</tt>, and
+<tt>rcu_for_each_leaf_node()</tt> function and macros:
+
+<pre>
+  1 static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
+  2 {
+  3   return &amp;rsp-&gt;node[0];
+  4 }
+  5
+  6 #define rcu_for_each_node_breadth_first(rsp, rnp) \
+  7   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+  8        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+  9
+ 10 #define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
+ 11   for ((rnp) = &amp;(rsp)-&gt;node[0]; \
+ 12        (rnp) &lt; (rsp)-&gt;level[NUM_RCU_LVLS - 1]; (rnp)++)
+ 13
+ 14 #define rcu_for_each_leaf_node(rsp, rnp) \
+ 15   for ((rnp) = (rsp)-&gt;level[NUM_RCU_LVLS - 1]; \
+ 16        (rnp) &lt; &amp;(rsp)-&gt;node[NUM_RCU_NODES]; (rnp)++)
+</pre>
+
+<p>The <tt>rcu_get_root()</tt> simply returns a pointer to the
+first element of the specified <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, which is the root <tt>rcu_node</tt>
+structure.
+
+</p><p>As noted earlier, the <tt>rcu_for_each_node_breadth_first()</tt>
+macro takes advantage of the layout of the <tt>rcu_node</tt>
+structures in the <tt>rcu_state</tt> structure's
+<tt>-&gt;node[]</tt> array, performing a breadth-first traversal by
+simply traversing the array in order.
+The <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> macro operates
+similarly, but traverses only the first part of the array, thus excluding
+the leaf <tt>rcu_node</tt> structures.
+Finally, the <tt>rcu_for_each_leaf_node()</tt> macro traverses only
+the last part of the array, thus traversing only the leaf
+<tt>rcu_node</tt> structures.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       What do <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> and
+       <tt>rcu_for_each_leaf_node()</tt> do if the <tt>rcu_node</tt> tree
+       contains only a single node?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       In the single-node case,
+       <tt>rcu_for_each_nonleaf_node_breadth_first()</tt> is a no-op
+       and <tt>rcu_for_each_leaf_node()</tt> traverses the single node.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<h3><a name="Summary">
+Summary</a></h3>
+
+So each flavor of RCU is represented by an <tt>rcu_state</tt> structure,
+which contains a combining tree of <tt>rcu_node</tt> and
+<tt>rcu_data</tt> structures.
+Finally, in <tt>CONFIG_NO_HZ_IDLE</tt> kernels, each CPU's dyntick-idle
+state is tracked by an <tt>rcu_dynticks</tt> structure.
+
+If you made it this far, you are well prepared to read the code
+walkthroughs in the other articles in this series.
+
+<h3><a name="Acknowledgments">
+Acknowledgments</a></h3>
+
+I owe thanks to Cyrill Gorcunov, Mathieu Desnoyers, Dhaval Giani, Paul
+Turner, Abhishek Srivastava, Matt Kowalczyk, and Serge Hallyn
+for helping me get this document into a more human-readable state.
+
+<h3><a name="Legal Statement">
+Legal Statement</a></h3>
+
+<p>This work represents the view of the author and does not necessarily
+represent the view of IBM.
+
+</p><p>Linux is a registered trademark of Linus Torvalds.
+
+</p><p>Other company, product, and service names may be trademarks or
+service marks of others.
+
+</body></html>
diff --git a/Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg b/Documentation/RCU/Design/Data-Structures/HugeTreeClassicRCU.svg
new file mode 100644 (file)
index 0000000..2bf12b4
--- /dev/null
@@ -0,0 +1,939 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:37:22 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="15.1in"
+   height="11.2in"
+   viewBox="-66 -66 18087 13407"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="HugeTreeClassicRCU.fig">
+  <metadata
+     id="metadata224">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs222">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3982"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1134"
+     inkscape:window-height="789"
+     id="namedview220"
+     showgrid="false"
+     inkscape:zoom="0.60515873"
+     inkscape:cx="679.5"
+     inkscape:cy="504"
+     inkscape:window-x="786"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="17100"
+       height="8325"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="11025"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="4275"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="5400"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="9900"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="14400"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="900"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line: box -->
+    <rect
+       x="7650"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect20" />
+    <!-- Line -->
+    <polyline
+       points="3150,9225 3150,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 3150 9225 - 3150 7560-->
+    <!-- Circle -->
+    <circle
+       cx="8550"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="9000"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="9450"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Line -->
+    <polyline
+       points="6750,6300 8250,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 6750 6300 - 8391 4890-->
+    <!-- Line -->
+    <polyline
+       points="11250,6300 9747,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 11250 6300 - 9606 4890-->
+    <!-- Circle -->
+    <circle
+       cx="13950"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle40" />
+    <!-- Circle -->
+    <circle
+       cx="13500"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle42" />
+    <!-- Circle -->
+    <circle
+       cx="13050"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle44" />
+    <!-- Circle -->
+    <circle
+       cx="9450"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle46" />
+    <!-- Circle -->
+    <circle
+       cx="9000"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle48" />
+    <!-- Circle -->
+    <circle
+       cx="8550"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle50" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle52" />
+    <!-- Circle -->
+    <circle
+       cx="4500"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle54" />
+    <!-- Circle -->
+    <circle
+       cx="4050"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle56" />
+    <!-- Circle -->
+    <circle
+       cx="1800"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle58" />
+    <!-- Circle -->
+    <circle
+       cx="2250"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle60" />
+    <!-- Circle -->
+    <circle
+       cx="2700"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle62" />
+    <!-- Circle -->
+    <circle
+       cx="15300"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle64" />
+    <!-- Circle -->
+    <circle
+       cx="15750"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle66" />
+    <!-- Circle -->
+    <circle
+       cx="16200"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="10800"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="11250"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="11700"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="6300"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="6750"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="7200"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle80" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect82" />
+    <!-- Line: box -->
+    <rect
+       x="1800"
+       y="9225"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect84" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="6300"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="8955"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="10755"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="13455"
+       y="11475"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="15255"
+       y="9270"
+       width="2700"
+       height="1800"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect96" />
+    <!-- Line -->
+    <polyline
+       points="11700,3600 10197,2310 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline98" />
+    <!-- Arrowhead on XXXpoint 11700 3600 - 10056 2190-->
+    <!-- Line -->
+    <polyline
+       points="6300,3600 7800,2310 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline102" />
+    <!-- Arrowhead on XXXpoint 6300 3600 - 7941 2190-->
+    <!-- Line -->
+    <polyline
+       points="3150,6300 4650,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline106" />
+    <!-- Arrowhead on XXXpoint 3150 6300 - 4791 4890-->
+    <!-- Line -->
+    <polyline
+       points="14850,6300 13347,5010 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline110" />
+    <!-- Arrowhead on XXXpoint 14850 6300 - 13206 4890-->
+    <!-- Line -->
+    <polyline
+       points="1350,11475 1350,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 1350 11475 - 1350 7560-->
+    <!-- Line -->
+    <polyline
+       points="16650,9225 16650,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline118" />
+    <!-- Arrowhead on XXXpoint 16650 9225 - 16650 7560-->
+    <!-- Line -->
+    <polyline
+       points="14850,11475 14850,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline122" />
+    <!-- Arrowhead on XXXpoint 14850 11475 - 14850 7560-->
+    <!-- Line -->
+    <polyline
+       points="12150,9225 12150,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline126" />
+    <!-- Arrowhead on XXXpoint 12150 9225 - 12150 7560-->
+    <!-- Line -->
+    <polyline
+       points="10350,11475 10350,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline130" />
+    <!-- Arrowhead on XXXpoint 10350 11475 - 10350 7560-->
+    <!-- Line -->
+    <polyline
+       points="7650,9225 7650,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline134" />
+    <!-- Arrowhead on XXXpoint 7650 9225 - 7650 7560-->
+    <!-- Line -->
+    <polyline
+       points="5850,11475 5850,7746 "
+       style="stroke:#00d1d1;stroke-width:44.99790066;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline138" />
+    <!-- Arrowhead on XXXpoint 5850 11475 - 5850 7560-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12375"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text142">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12375"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5625"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text146">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5625"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text148">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6750"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text150">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6750"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text152">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11250"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text154">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11250"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15750"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text158">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15750"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text160">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text164">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text166">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text170">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="10800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text172">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="9675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="10125"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text176">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text178">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text180">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5850"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text182">CPU 21823</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text184">CPU 21839</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text186">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7650"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text188">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text192">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10305"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text194">CPU 43679</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text196">CPU 43695</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text198">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text200">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="11925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text202">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="12375"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text204">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14805"
+       y="13050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text206">CPU 65519</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="10845"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text208">CPU 65535</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="10170"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text210">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="16605"
+       y="9720"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text212">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="675"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text214">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9000"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text216">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9000"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text218">rcu_node</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeLevel.svg b/Documentation/RCU/Design/Data-Structures/TreeLevel.svg
new file mode 100644 (file)
index 0000000..7a7eb3b
--- /dev/null
@@ -0,0 +1,828 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:41:29 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="17.7in"
+   height="10.4in"
+   viewBox="-66 -66 21237 12507"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeLevel.fig">
+  <metadata
+     id="metadata216">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs214">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3974"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1023"
+     inkscape:window-height="1148"
+     id="namedview212"
+     showgrid="false"
+     inkscape:zoom="0.55869424"
+     inkscape:cx="796.50006"
+     inkscape:cy="467.99997"
+     inkscape:window-x="897"
+     inkscape:window-y="24"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="20655"
+       height="8325"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="14130"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="7380"
+       y="3600"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="8505"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="13005"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="17505"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="4005"
+       y="6300"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line: box -->
+    <rect
+       x="10755"
+       y="900"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect20" />
+    <!-- Line -->
+    <polyline
+       points="6255,9225 6255,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline22" />
+    <!-- Arrowhead on XXXpoint 6255 9225 - 6255 7560-->
+    <!-- Circle -->
+    <circle
+       cx="11655"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle26" />
+    <!-- Circle -->
+    <circle
+       cx="12105"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle28" />
+    <!-- Circle -->
+    <circle
+       cx="12555"
+       cy="4275"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle30" />
+    <!-- Line -->
+    <polyline
+       points="9855,6300 11355,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 9855 6300 - 11496 4890-->
+    <!-- Line -->
+    <polyline
+       points="14355,6300 12852,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 14355 6300 - 12711 4890-->
+    <!-- Circle -->
+    <circle
+       cx="17055"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle40" />
+    <!-- Circle -->
+    <circle
+       cx="16605"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle42" />
+    <!-- Circle -->
+    <circle
+       cx="16155"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle44" />
+    <!-- Circle -->
+    <circle
+       cx="12555"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle46" />
+    <!-- Circle -->
+    <circle
+       cx="12105"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle48" />
+    <!-- Circle -->
+    <circle
+       cx="11655"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle50" />
+    <!-- Circle -->
+    <circle
+       cx="8055"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle52" />
+    <!-- Circle -->
+    <circle
+       cx="7605"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle54" />
+    <!-- Circle -->
+    <circle
+       cx="7155"
+       cy="6975"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle56" />
+    <!-- Circle -->
+    <circle
+       cx="4905"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle58" />
+    <!-- Circle -->
+    <circle
+       cx="5355"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle60" />
+    <!-- Circle -->
+    <circle
+       cx="5805"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle62" />
+    <!-- Circle -->
+    <circle
+       cx="18405"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle64" />
+    <!-- Circle -->
+    <circle
+       cx="18855"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle66" />
+    <!-- Circle -->
+    <circle
+       cx="19305"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="13905"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="14355"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="14805"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="9405"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="9855"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="10305"
+       cy="8775"
+       r="114"
+       style="fill:#000000;stroke:#000000;stroke-width:21;"
+       id="circle80" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="1125"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect82" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="2250"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect84" />
+    <!-- Line: box -->
+    <rect
+       x="225"
+       y="3375"
+       width="3150"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:21; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect86" />
+    <!-- Line -->
+    <polyline
+       points="14805,3600 13302,2310 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline88" />
+    <!-- Arrowhead on XXXpoint 14805 3600 - 13161 2190-->
+    <!-- Line -->
+    <polyline
+       points="9405,3600 10905,2310 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline92" />
+    <!-- Arrowhead on XXXpoint 9405 3600 - 11046 2190-->
+    <!-- Line -->
+    <polyline
+       points="6255,6300 7755,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline96" />
+    <!-- Arrowhead on XXXpoint 6255 6300 - 7896 4890-->
+    <!-- Line -->
+    <polyline
+       points="17955,6300 16452,5010 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline100" />
+    <!-- Arrowhead on XXXpoint 17955 6300 - 16311 4890-->
+    <!-- Line -->
+    <polyline
+       points="4455,11025 4455,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline104" />
+    <!-- Arrowhead on XXXpoint 4455 11025 - 4455 7560-->
+    <!-- Line -->
+    <polyline
+       points="19755,9225 19755,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 19755 9225 - 19755 7560-->
+    <!-- Line -->
+    <polyline
+       points="17955,11025 17955,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline112" />
+    <!-- Arrowhead on XXXpoint 17955 11025 - 17955 7560-->
+    <!-- Line -->
+    <polyline
+       points="15255,9225 15255,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline116" />
+    <!-- Arrowhead on XXXpoint 15255 9225 - 15255 7560-->
+    <!-- Line -->
+    <polyline
+       points="13455,11025 13455,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline120" />
+    <!-- Arrowhead on XXXpoint 13455 11025 - 13455 7560-->
+    <!-- Line -->
+    <polyline
+       points="10755,9225 10755,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline124" />
+    <!-- Arrowhead on XXXpoint 10755 9225 - 10755 7560-->
+    <!-- Line -->
+    <polyline
+       points="8955,11025 8955,7746 "
+       style="stroke:#00d1d1;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline128" />
+    <!-- Arrowhead on XXXpoint 8955 11025 - 8955 7560-->
+    <!-- Line: box -->
+    <rect
+       x="12105"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect132" />
+    <!-- Line: box -->
+    <rect
+       x="13905"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect134" />
+    <!-- Line: box -->
+    <rect
+       x="16605"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect136" />
+    <!-- Line: box -->
+    <rect
+       x="18405"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect138" />
+    <!-- Line: box -->
+    <rect
+       x="9405"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect140" />
+    <!-- Line: box -->
+    <rect
+       x="7605"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect142" />
+    <!-- Line: box -->
+    <rect
+       x="4905"
+       y="9225"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect144" />
+    <!-- Line: box -->
+    <rect
+       x="3105"
+       y="11025"
+       width="2700"
+       height="1350"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect146" />
+    <!-- Line -->
+    <polyline
+       points="3375,1575 10701,1575 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline148" />
+    <!-- Arrowhead on XXXpoint 3375 1575 - 10890 1575-->
+    <!-- Line -->
+    <polyline
+       points="3375,3825 4050,3825 4050,5400 2700,5400 2700,6975 3951,6975 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline152" />
+    <!-- Arrowhead on XXXpoint 2700 6975 - 4140 6975-->
+    <!-- Line -->
+    <polyline
+       points="3375,2700 5175,2700 5175,4275 7326,4275 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline156" />
+    <!-- Arrowhead on XXXpoint 5175 4275 - 7515 4275-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15480"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text160">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15480"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text164">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8730"
+       y="4500"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text166">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9855"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text168">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="9855"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text170">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14355"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text172">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="14355"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="18855"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text176">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="18855"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text178">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5355"
+       y="6750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text180">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5355"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text182">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text184">-&gt;level[0]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="2925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text186">-&gt;level[1]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text188">-&gt;level[2]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text190">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="12105"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="middle"
+       id="text192">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6255"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text194">CPU 15</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4455"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text196">CPU 0</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="19755"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text198">CPU 65535</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="17955"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text200">CPU 65519</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="15255"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text202">CPU 43695</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="13455"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text204">CPU 43679</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="10755"
+       y="10125"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text206">CPU 21839</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="8955"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text208">CPU 21823</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="450"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="288"
+       text-anchor="start"
+       id="text210">struct rcu_state</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeMapping.svg b/Documentation/RCU/Design/Data-Structures/TreeMapping.svg
new file mode 100644 (file)
index 0000000..729cfa9
--- /dev/null
@@ -0,0 +1,305 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:43:22 2015 -->
+
+<!-- Magnification: 1.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="3.1in"
+   height="0.9in"
+   viewBox="-12 -12 3699 1074"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeMapping.fig">
+  <metadata
+     id="metadata66">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs64">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3836"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow2Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Mend"
+       style="overflow:visible;">
+      <path
+         id="path3842"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(0.6) rotate(180) translate(0,0)" />
+    </marker>
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3824"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="991"
+     inkscape:window-height="606"
+     id="namedview62"
+     showgrid="false"
+     inkscape:zoom="3.0752688"
+     inkscape:cx="139.5"
+     inkscape:cy="40.5"
+     inkscape:window-x="891"
+     inkscape:window-y="177"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="3675"
+       height="1050"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="600"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="1650"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="2175"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="3225"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect18" />
+    <!-- Line -->
+    <polyline
+       points="675,375 675,150 300,150 300,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 300 150 - 300 390-->
+    <!-- Line -->
+    <polyline
+       points="1200,675 1200,900 300,900 300,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 300 900 - 300 660-->
+    <!-- Line -->
+    <polyline
+       points="1725,375 1725,150 900,150 900,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline28" />
+    <!-- Arrowhead on XXXpoint 900 150 - 900 390-->
+    <!-- Line -->
+    <polyline
+       points="2250,375 2250,75 825,75 825,358 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 825 75 - 825 390-->
+    <!-- Line -->
+    <polyline
+       points="2775,675 2775,900 1425,900 1425,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 1425 900 - 1425 660-->
+    <!-- Line -->
+    <polyline
+       points="3300,675 3300,975 1350,975 1350,691 "
+       style="stroke:#000000;stroke-width:7.00088889;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 1350 975 - 1350 660-->
+    <!-- Line: box -->
+    <rect
+       x="2700"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect44" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="300"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text46">0:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text48">4:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1875"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text50">0:1  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text52">2:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2925"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text54">4:5  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3450"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text56">6:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="825"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text58">0:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3600"
+       y="150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="end"
+       id="text60">struct rcu_state</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg b/Documentation/RCU/Design/Data-Structures/TreeMappingLevel.svg
new file mode 100644 (file)
index 0000000..5b416a4
--- /dev/null
@@ -0,0 +1,380 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:45:19 2015 -->
+
+<!-- Magnification: 1.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="3.1in"
+   height="1.8in"
+   viewBox="-12 -12 3699 2124"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="TreeMappingLevel.svg">
+  <metadata
+     id="metadata98">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title />
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs96">
+    <marker
+       inkscape:stockid="Arrow2Lend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow2Lend"
+       style="overflow:visible;">
+      <path
+         id="path3868"
+         style="fill-rule:evenodd;stroke-width:0.62500000;stroke-linejoin:round;"
+         d="M 8.7185878,4.0337352 L -2.2072895,0.016013256 L 8.7185884,-4.0017078 C 6.9730900,-1.6296469 6.9831476,1.6157441 8.7185878,4.0337352 z "
+         transform="scale(1.1) rotate(180) translate(1,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1598"
+     inkscape:window-height="1211"
+     id="namedview94"
+     showgrid="false"
+     inkscape:zoom="5.2508961"
+     inkscape:cx="139.5"
+     inkscape:cy="81"
+     inkscape:window-x="840"
+     inkscape:window-y="122"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="3675"
+       height="2100"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1350"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1575"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="1800"
+       width="750"
+       height="225"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect12" />
+    <!-- Arc -->
+    <path
+       style="stroke:#000000;stroke-width:7;stroke-linecap:butt;"
+       d="M 1800,900 A 118 118  0  0  0  1800  1125 "
+       id="path14" />
+    <!-- Arc -->
+    <path
+       style="stroke:#000000;stroke-width:7;stroke-linecap:butt;"
+       d="M 750,900 A 75 75  0  0  0  750  1050 "
+       id="path16" />
+    <!-- Line -->
+    <polyline
+       points="750,900 750,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline18" />
+    <!-- Arrowhead on XXXpoint 750 900 - 750 660-->
+    <!-- Line: box -->
+    <rect
+       x="75"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect22" />
+    <!-- Line: box -->
+    <rect
+       x="600"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect24" />
+    <!-- Line: box -->
+    <rect
+       x="1650"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect26" />
+    <!-- Line: box -->
+    <rect
+       x="2175"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="3225"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="675,375 675,150 300,150 300,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 300 150 - 300 390-->
+    <!-- Line -->
+    <polyline
+       points="1725,375 1725,150 900,150 900,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 900 150 - 900 390-->
+    <!-- Line -->
+    <polyline
+       points="2250,375 2250,75 825,75 825,358 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 825 75 - 825 390-->
+    <!-- Line -->
+    <polyline
+       points="2775,675 2775,975 1425,975 1425,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1425 975 - 1425 660-->
+    <!-- Line: box -->
+    <rect
+       x="2700"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect48" />
+    <!-- Line: box -->
+    <rect
+       x="1125"
+       y="375"
+       width="375"
+       height="300"
+       rx="0"
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect50" />
+    <!-- Line -->
+    <polyline
+       points="3300,675 3300,1050 1350,1050 1350,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 1350 1050 - 1350 660-->
+    <!-- Line -->
+    <polyline
+       points="825,1425 975,1425 975,1200 225,1200 225,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 225 1200 - 225 660-->
+    <!-- Line -->
+    <polyline
+       points="1200,675 1200,975 300,975 300,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 300 975 - 300 660-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1500"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text64">-&gt;level[0]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1725"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text66">-&gt;level[1]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="150"
+       y="1950"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="108"
+       text-anchor="start"
+       id="text68">-&gt;level[2]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="300"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text70">0:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1350"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text72">4:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1875"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text74">0:1  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text76">2:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2925"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text78">4:5  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3450"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text80">6:7  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="825"
+       y="525"
+       fill="#000000"
+       font-family="Times"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="middle"
+       id="text82">0:3  </text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3600"
+       y="150"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="normal"
+       font-size="96"
+       text-anchor="end"
+       id="text84">struct rcu_state</text>
+    <!-- Line -->
+    <polyline
+       points="825,1875 1800,1875 1800,1125 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:none"
+       id="polyline86" />
+    <!-- Line -->
+    <polyline
+       points="1800,900 1800,691 "
+       style="stroke:#000000;stroke-width:7.00025806;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow2Lend)"
+       id="polyline88" />
+    <!-- Arrowhead on XXXpoint 1800 900 - 1800 660-->
+    <!-- Line -->
+    <polyline
+       points="825,1650 1200,1650 1200,1125 750,1125 750,1050 "
+       style="stroke:#000000;stroke-width:7; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline92" />
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/blkd_task.svg b/Documentation/RCU/Design/Data-Structures/blkd_task.svg
new file mode 100644 (file)
index 0000000..00e810b
--- /dev/null
@@ -0,0 +1,843 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:35:03 2015 -->
+
+<!-- Magnification: 2.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="10.1in"
+   height="8.6in"
+   viewBox="-44 -44 12088 10288"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="blkd_task.fig">
+  <metadata
+     id="metadata212">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs210">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3970"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="1087"
+     inkscape:window-height="1144"
+     id="namedview208"
+     showgrid="false"
+     inkscape:zoom="1.0495049"
+     inkscape:cx="454.50003"
+     inkscape:cy="387.00003"
+     inkscape:window-x="833"
+     inkscape:window-y="28"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="450"
+       y="0"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="4950"
+       y="4950"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="600"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect10" />
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5688,5912 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline12" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5710 5790-->
+    <polyline
+       points="5714 6068 5704 5822 5598 6044 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline14" />
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4486,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline16" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4512 7140-->
+    <polyline
+       points="4514 7418 4506 7172 4396 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline18" />
+    <!-- Line -->
+    <polyline
+       points="1040,9300 1476,7262 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 1040 9300 - 1502 7140-->
+    <polyline
+       points="1504 7418 1496 7172 1386 7394 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline22" />
+    <!-- Line -->
+    <polyline
+       points="2240,8100 2676,6062 "
+       style="stroke:#00ff00;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="polyline24" />
+    <!-- Arrowhead on XXXpoint 2240 8100 - 2702 5940-->
+    <polyline
+       points="2704 6218 2696 5972 2586 6194 "
+       style="stroke:#00ff00;stroke-width:14;stroke-miterlimit:8; "
+       id="polyline26" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="450"
+       width="6300"
+       height="7350"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffffff; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="1050"
+       width="5700"
+       height="3750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffff00; "
+       id="rect30" />
+    <!-- Line -->
+    <polyline
+       points="1350,3450 2350,2590 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline32" />
+    <!-- Arrowhead on XXXpoint 1350 3450 - 2444 2510-->
+    <!-- Line -->
+    <polyline
+       points="4950,3450 3948,2590 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline36" />
+    <!-- Arrowhead on XXXpoint 4950 3450 - 3854 2510-->
+    <!-- Line -->
+    <polyline
+       points="4050,6600 4050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline40" />
+    <!-- Arrowhead on XXXpoint 4050 6600 - 4050 4290-->
+    <!-- Line -->
+    <polyline
+       points="1050,6600 1050,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline44" />
+    <!-- Arrowhead on XXXpoint 1050 6600 - 1050 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,5400 2250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline48" />
+    <!-- Arrowhead on XXXpoint 2250 5400 - 2250 4290-->
+    <!-- Line -->
+    <polyline
+       points="2250,8100 2250,6364 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline52" />
+    <!-- Arrowhead on XXXpoint 2250 8100 - 2250 6240-->
+    <!-- Line -->
+    <polyline
+       points="1050,9300 1050,7564 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline56" />
+    <!-- Arrowhead on XXXpoint 1050 9300 - 1050 7440-->
+    <!-- Line -->
+    <polyline
+       points="4050,9300 4050,7564 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 4050 9300 - 4050 7440-->
+    <!-- Line -->
+    <polyline
+       points="5250,8100 5250,6364 "
+       style="stroke:#00ff00;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline64" />
+    <!-- Arrowhead on XXXpoint 5250 8100 - 5250 6240-->
+    <!-- Circle -->
+    <circle
+       cx="2850"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle68" />
+    <!-- Circle -->
+    <circle
+       cx="3150"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle70" />
+    <!-- Circle -->
+    <circle
+       cx="3450"
+       cy="3900"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle72" />
+    <!-- Circle -->
+    <circle
+       cx="1350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle74" />
+    <!-- Circle -->
+    <circle
+       cx="1650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle76" />
+    <!-- Circle -->
+    <circle
+       cx="1950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle78" />
+    <!-- Circle -->
+    <circle
+       cx="4350"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle80" />
+    <!-- Circle -->
+    <circle
+       cx="4650"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle82" />
+    <!-- Circle -->
+    <circle
+       cx="4950"
+       cy="5100"
+       r="76"
+       style="fill:#000000;stroke:#000000;stroke-width:14;"
+       id="circle84" />
+    <!-- Line: box -->
+    <rect
+       x="750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect86" />
+    <!-- Line: box -->
+    <rect
+       x="300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect88" />
+    <!-- Line: box -->
+    <rect
+       x="4500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect90" />
+    <!-- Line: box -->
+    <rect
+       x="3300"
+       y="6600"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect92" />
+    <!-- Line: box -->
+    <rect
+       x="2250"
+       y="1650"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect94" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect96" />
+    <!-- Line: box -->
+    <rect
+       x="1350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect98" />
+    <!-- Line: box -->
+    <rect
+       x="3000"
+       y="9300"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect100" />
+    <!-- Line: box -->
+    <rect
+       x="4350"
+       y="8100"
+       width="2100"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#00ff00; "
+       id="rect102" />
+    <!-- Line: box -->
+    <rect
+       x="1500"
+       y="5400"
+       width="1500"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect104" />
+    <!-- Line -->
+    <polygon
+       points="5550,3450 7350,2850 7350,5100 5550,4350 5550,3450 "
+       style="stroke:#000000;stroke-width:14; stroke-linejoin:miter; stroke-linecap:butt; stroke-dasharray:120 120;fill:#ffbfbf; "
+       id="polygon106" />
+    <!-- Line -->
+    <polyline
+       points="9300,3150 10734,3150 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline108" />
+    <!-- Arrowhead on XXXpoint 9300 3150 - 10860 3150-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="2850"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect112" />
+    <!-- Line -->
+    <polyline
+       points="11400,3600 11400,4284 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline114" />
+    <!-- Arrowhead on XXXpoint 11400 3600 - 11400 4410-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="4350"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect118" />
+    <!-- Line -->
+    <polyline
+       points="11400,5100 11400,5784 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline120" />
+    <!-- Arrowhead on XXXpoint 11400 5100 - 11400 5910-->
+    <!-- Line: box -->
+    <rect
+       x="10800"
+       y="5850"
+       width="1200"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect124" />
+    <!-- Line -->
+    <polyline
+       points="9300,3900 9900,3900 9900,4650 10734,4650 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline126" />
+    <!-- Arrowhead on XXXpoint 9900 4650 - 10860 4650-->
+    <!-- Line -->
+    <polyline
+       points="9300,4650 9600,4650 9600,6150 10734,6150 "
+       style="stroke:#000000;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline130" />
+    <!-- Arrowhead on XXXpoint 9600 6150 - 10860 6150-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6450"
+       y="300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text134">rcu_bh</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="1950"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text136">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="3150"
+       y="2250"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text138">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text140">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text142">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text144">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text146">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text148">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text150">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="5700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text152">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5250"
+       y="6000"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text154">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="6900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text156">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="7200"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text158">rcu_data</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="450"
+       y="1350"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text160">struct rcu_state</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text162">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="1050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text164">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9600"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text166">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4050"
+       y="9900"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text168">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text170">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="2400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text172">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8400"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text174">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="5400"
+       y="8700"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text176">rcu_dynticks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="6000"
+       y="750"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="192"
+       text-anchor="end"
+       id="text178">rcu_sched</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="3300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text180">T3</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="4800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text182">T2</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11400"
+       y="6300"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="216"
+       text-anchor="middle"
+       id="text184">T1</text>
+    <!-- Line -->
+    <polyline
+       points="5250,5400 5250,4414 "
+       style="stroke:#00d1d1;stroke-width:30.00057884;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline186" />
+    <!-- Arrowhead on XXXpoint 5250 5400 - 5250 4290-->
+    <!-- Line: box -->
+    <rect
+       x="3750"
+       y="3450"
+       width="1800"
+       height="900"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect190" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="2850"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect192" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="3600"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect194" />
+    <!-- Line: box -->
+    <rect
+       x="7350"
+       y="4350"
+       width="1950"
+       height="750"
+       rx="0"
+       style="stroke:#000000;stroke-width:30; stroke-linejoin:miter; stroke-linecap:butt; fill:#ffbfbf; "
+       id="rect196" />
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text198">rcu_node</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="4650"
+       y="3750"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="middle"
+       id="text200">struct</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="3300"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text202">blkd_tasks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text204">gp_tasks</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="7500"
+       y="4800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="192"
+       text-anchor="start"
+       id="text206">exp_tasks</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Data-Structures/nxtlist.svg b/Documentation/RCU/Design/Data-Structures/nxtlist.svg
new file mode 100644 (file)
index 0000000..abc4cc7
--- /dev/null
@@ -0,0 +1,396 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Creator: fig2dev Version 3.2 Patchlevel 5e -->
+
+<!-- CreationDate: Wed Dec  9 17:39:46 2015 -->
+
+<!-- Magnification: 3.000 -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="10.4in"
+   height="10.4in"
+   viewBox="-66 -66 12507 12507"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.4 r9939"
+   sodipodi:docname="nxtlist.fig">
+  <metadata
+     id="metadata94">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <defs
+     id="defs92">
+    <marker
+       inkscape:stockid="Arrow1Mend"
+       orient="auto"
+       refY="0.0"
+       refX="0.0"
+       id="Arrow1Mend"
+       style="overflow:visible;">
+      <path
+         id="path3852"
+         d="M 0.0,0.0 L 5.0,-5.0 L -12.5,0.0 L 5.0,5.0 L 0.0,0.0 z "
+         style="fill-rule:evenodd;stroke:#000000;stroke-width:1.0pt;"
+         transform="scale(0.4) rotate(180) translate(10,0)" />
+    </marker>
+  </defs>
+  <sodipodi:namedview
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1"
+     objecttolerance="10"
+     gridtolerance="10"
+     guidetolerance="10"
+     inkscape:pageopacity="0"
+     inkscape:pageshadow="2"
+     inkscape:window-width="925"
+     inkscape:window-height="928"
+     id="namedview90"
+     showgrid="false"
+     inkscape:zoom="0.80021373"
+     inkscape:cx="467.99997"
+     inkscape:cy="467.99997"
+     inkscape:window-x="948"
+     inkscape:window-y="73"
+     inkscape:window-maximized="0"
+     inkscape:current-layer="g4" />
+  <g
+     style="stroke-width:.025in; fill:none"
+     id="g4">
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="0"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect6" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="1125"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect8" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="2250"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect10" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="3375"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect12" />
+    <!-- Line: box -->
+    <rect
+       x="0"
+       y="4500"
+       width="7875"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; fill:#87cfff; "
+       id="rect14" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="0"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect16" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="1125"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect18" />
+    <!-- Line -->
+    <polyline
+       points="11475,2250 11475,3276 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline20" />
+    <!-- Arrowhead on XXXpoint 11475 2250 - 11475 3465-->
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="6750"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect24" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="7875"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect26" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="10125"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect28" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="11250"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect30" />
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="3375"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect32" />
+    <!-- Line -->
+    <polyline
+       points="11475,5625 11475,6651 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline34" />
+    <!-- Arrowhead on XXXpoint 11475 5625 - 11475 6840-->
+    <!-- Line -->
+    <polyline
+       points="7875,225 10476,225 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline38" />
+    <!-- Arrowhead on XXXpoint 7875 225 - 10665 225-->
+    <!-- Line -->
+    <polyline
+       points="7875,1350 9675,1350 9675,675 7971,675 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline42" />
+    <!-- Arrowhead on XXXpoint 9675 675 - 7785 675-->
+    <!-- Line -->
+    <polyline
+       points="7875,2475 9675,2475 9675,4725 10476,4725 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline46" />
+    <!-- Arrowhead on XXXpoint 9675 4725 - 10665 4725-->
+    <!-- Line -->
+    <polyline
+       points="7875,3600 9225,3600 9225,5175 10476,5175 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline50" />
+    <!-- Arrowhead on XXXpoint 9225 5175 - 10665 5175-->
+    <!-- Line -->
+    <polyline
+       points="7875,4725 8775,4725 8775,11475 10476,11475 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline54" />
+    <!-- Arrowhead on XXXpoint 8775 11475 - 10665 11475-->
+    <!-- Line: box -->
+    <rect
+       x="10575"
+       y="4500"
+       width="1800"
+       height="1125"
+       rx="0"
+       style="stroke:#000000;stroke-width:45; stroke-linejoin:miter; stroke-linecap:butt; "
+       id="rect58" />
+    <!-- Line -->
+    <polyline
+       points="11475,9000 11475,10026 "
+       style="stroke:#000000;stroke-width:45.00382345;stroke-linejoin:miter;stroke-linecap:butt;stroke-miterlimit:4;stroke-dasharray:none;marker-end:url(#Arrow1Mend)"
+       id="polyline60" />
+    <!-- Arrowhead on XXXpoint 11475 9000 - 11475 10215-->
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="675"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text64">nxtlist</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="1800"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text66">nxttail[RCU_DONE_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="2925"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text68">nxttail[RCU_WAIT_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="4050"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text70">nxttail[RCU_NEXT_READY_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="225"
+       y="5175"
+       fill="#000000"
+       font-family="Courier"
+       font-style="normal"
+       font-weight="bold"
+       font-size="324"
+       text-anchor="start"
+       id="text72">nxttail[RCU_NEXT_TAIL]</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="675"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text74">CB 1</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="1800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text76">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="7425"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text78">CB 3</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="8550"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text80">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="10800"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text82">CB 4</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="11925"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text84">next</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="4050"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text86">CB 2</text>
+    <!-- Text -->
+    <text
+       xml:space="preserve"
+       x="11475"
+       y="5175"
+       fill="#000000"
+       font-family="Helvetica"
+       font-style="normal"
+       font-weight="normal"
+       font-size="324"
+       text-anchor="middle"
+       id="text88">next</text>
+  </g>
+</svg>
diff --git a/Documentation/RCU/Design/Requirements/2013-08-is-it-dead.png b/Documentation/RCU/Design/Requirements/2013-08-is-it-dead.png
deleted file mode 100644 (file)
index 7496a55..0000000
Binary files a/Documentation/RCU/Design/Requirements/2013-08-is-it-dead.png and /dev/null differ
diff --git a/Documentation/RCU/Design/Requirements/RCUApplicability.svg b/Documentation/RCU/Design/Requirements/RCUApplicability.svg
deleted file mode 100644 (file)
index ebcbeee..0000000
+++ /dev/null
@@ -1,237 +0,0 @@
-<?xml version="1.0" encoding="UTF-8" standalone="no"?>
-<!-- Creator: fig2dev Version 3.2 Patchlevel 5d -->
-
-<!-- CreationDate: Tue Mar  4 18:34:25 2014 -->
-
-<!-- Magnification: 3.000 -->
-
-<svg
-   xmlns:dc="http://purl.org/dc/elements/1.1/"
-   xmlns:cc="http://creativecommons.org/ns#"
-   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
-   xmlns:svg="http://www.w3.org/2000/svg"
-   xmlns="http://www.w3.org/2000/svg"
-   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
-   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
-   width="1089.1382"
-   height="668.21368"
-   viewBox="-2121 -36 14554.634 8876.4061"
-   id="svg2"
-   version="1.1"
-   inkscape:version="0.48.3.1 r9886"
-   sodipodi:docname="RCUApplicability.svg">
-  <metadata
-     id="metadata40">
-    <rdf:RDF>
-      <cc:Work
-         rdf:about="">
-        <dc:format>image/svg+xml</dc:format>
-        <dc:type
-           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
-        <dc:title />
-      </cc:Work>
-    </rdf:RDF>
-  </metadata>
-  <defs
-     id="defs38" />
-  <sodipodi:namedview
-     pagecolor="#ffffff"
-     bordercolor="#666666"
-     borderopacity="1"
-     objecttolerance="10"
-     gridtolerance="10"
-     guidetolerance="10"
-     inkscape:pageopacity="0"
-     inkscape:pageshadow="2"
-     inkscape:window-width="849"
-     inkscape:window-height="639"
-     id="namedview36"
-     showgrid="false"
-     inkscape:zoom="0.51326165"
-     inkscape:cx="544.56912"
-     inkscape:cy="334.10686"
-     inkscape:window-x="149"
-     inkscape:window-y="448"
-     inkscape:window-maximized="0"
-     inkscape:current-layer="g4"
-     fit-margin-top="5"
-     fit-margin-left="5"
-     fit-margin-right="5"
-     fit-margin-bottom="5" />
-  <g
-     style="fill:none;stroke-width:0.025in"
-     id="g4"
-     transform="translate(-2043.6828,14.791398)">
-    <!-- Line: box -->
-    <rect
-       x="0"
-       y="0"
-       width="14400"
-       height="8775"
-       rx="0"
-       style="fill:#ffa1a1;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect6" />
-    <!-- Line: box -->
-    <rect
-       x="1350"
-       y="0"
-       width="11700"
-       height="6075"
-       rx="0"
-       style="fill:#ffff00;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect8" />
-    <!-- Line: box -->
-    <rect
-       x="2700"
-       y="0"
-       width="9000"
-       height="4275"
-       rx="0"
-       style="fill:#00ff00;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect10" />
-    <!-- Line: box -->
-    <rect
-       x="4050"
-       y="0"
-       width="6300"
-       height="2475"
-       rx="0"
-       style="fill:#87cfff;stroke:#000000;stroke-width:21;stroke-linecap:butt;stroke-linejoin:miter"
-       id="rect12" />
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="900"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text14"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3017">Read-Mostly, Stale &amp;</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="1350"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text16"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3019">Inconsistent Data OK</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="1800"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text18"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3021">(RCU Works Great!!!)</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="3825"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text20"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3023">(RCU Works Well)</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="3375"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text22"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3025">Read-Mostly, Need Consistent Data</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="5175"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text24"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3027">Read-Write, Need Consistent Data</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="6975"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text26"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">Update-Mostly, Need Consistent Data</text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="5625"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text28"
-       sodipodi:linespacing="125%"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"><tspan
-         style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-         id="tspan3029">(RCU Might Be OK...)</tspan></text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="7875"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text30"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">(1) Provide Existence Guarantees For Update-Friendly Mechanisms</text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="8325"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text32"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">(2) Provide Wait-Free Read-Side Primitives for Real-Time Use)</text>
-    <!-- Text -->
-    <text
-       xml:space="preserve"
-       x="7200"
-       y="7425"
-       font-style="normal"
-       font-weight="normal"
-       font-size="324"
-       id="text34"
-       style="font-size:427.63009644px;font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;text-align:center;line-height:125%;writing-mode:lr-tb;text-anchor:middle;fill:#000000;font-family:Nimbus Sans L;-inkscape-font-specification:Nimbus Sans L"
-       sodipodi:linespacing="125%">(RCU is Very Unlikely to be the Right Tool For The Job, But it Can:</text>
-  </g>
-</svg>
index a725f9900ec8962a43dfb888f71f1a2b05efabf0..e7e24b3e86e29e2c721c447c237de890b11e7062 100644 (file)
@@ -1,5 +1,3 @@
-<!-- DO NOT HAND EDIT. -->
-<!-- Instead, edit Documentation/RCU/Design/Requirements/Requirements.htmlx and run 'sh htmlqqz.sh Documentation/RCU/Design/Requirements/Requirements' -->
 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
         "http://www.w3.org/TR/html4/loose.dtd">
         <html>
@@ -65,8 +63,8 @@ All that aside, here are the categories of currently known RCU requirements:
 
 <p>
 This is followed by a <a href="#Summary">summary</a>,
-which is in turn followed by the inevitable
-<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
+however, the answers to each quick quiz immediately follows the quiz.
+Select the big white space with your mouse to see the answer.
 
 <h2><a name="Fundamental Requirements">Fundamental Requirements</a></h2>
 
@@ -153,13 +151,27 @@ Therefore, the outcome:
 </blockquote>
 cannot happen.
 
-<p><a name="Quick Quiz 1"><b>Quick Quiz 1</b>:</a>
-Wait a minute!
-You said that updaters can make useful forward progress concurrently
-with readers, but pre-existing readers will block
-<tt>synchronize_rcu()</tt>!!!
-Just who are you trying to fool???
-<br><a href="#qq1answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Wait a minute!
+       You said that updaters can make useful forward progress concurrently
+       with readers, but pre-existing readers will block
+       <tt>synchronize_rcu()</tt>!!!
+       Just who are you trying to fool???
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       First, if updaters do not wish to be blocked by readers, they can use
+       <tt>call_rcu()</tt> or <tt>kfree_rcu()</tt>, which will
+       be discussed later.
+       Second, even when using <tt>synchronize_rcu()</tt>, the other
+       update-side code does run concurrently with readers, whether
+       pre-existing or not.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 This scenario resembles one of the first uses of RCU in
@@ -210,9 +222,20 @@ to guarantee that <tt>do_something()</tt> never runs concurrently
 with <tt>recovery()</tt>, but with little or no synchronization
 overhead in <tt>do_something_dlm()</tt>.
 
-<p><a name="Quick Quiz 2"><b>Quick Quiz 2</b>:</a>
-Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
-<br><a href="#qq2answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Without that extra grace period, memory reordering could result in
+       <tt>do_something_dlm()</tt> executing <tt>do_something()</tt>
+       concurrently with the last bits of <tt>recovery()</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 In order to avoid fatal problems such as deadlocks,
@@ -332,12 +355,27 @@ It also prevents any number of &ldquo;interesting&rdquo; compiler
 optimizations, for example, the use of <tt>gp</tt> as a scratch
 location immediately preceding the assignment.
 
-<p><a name="Quick Quiz 3"><b>Quick Quiz 3</b>:</a>
-But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
-two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
-from being reordered.
-Can't that also cause problems?
-<br><a href="#qq3answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
+       two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
+       from being reordered.
+       Can't that also cause problems?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       No, it cannot.
+       The readers cannot see either of these two fields until
+       the assignment to <tt>gp</tt>, by which time both fields are
+       fully initialized.
+       So reordering the assignments
+       to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt> cannot possibly
+       cause any problems.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 It is tempting to assume that the reader need not do anything special
@@ -494,11 +532,42 @@ The <tt>rcu_access_pointer()</tt> on line&nbsp;6 is similar to
        code protected by the corresponding update-side lock.
 </ol>
 
-<p><a name="Quick Quiz 4"><b>Quick Quiz 4</b>:</a>
-Without the <tt>rcu_dereference()</tt> or the
-<tt>rcu_access_pointer()</tt>, what destructive optimizations
-might the compiler make use of?
-<br><a href="#qq4answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Without the <tt>rcu_dereference()</tt> or the
+       <tt>rcu_access_pointer()</tt>, what destructive optimizations
+       might the compiler make use of?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Let's start with what happens to <tt>do_something_gp()</tt>
+       if it fails to use <tt>rcu_dereference()</tt>.
+       It could reuse a value formerly fetched from this same pointer.
+       It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
+       manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
+       mash-up of two distince pointer values.
+       It might even use value-speculation optimizations, where it makes
+       a wrong guess, but by the time it gets around to checking the
+       value, an update has changed the pointer to match the wrong guess.
+       Too bad about any dereferences that returned pre-initialization garbage
+       in the meantime!
+       </font>
+
+       <p><font color="ffffff">
+       For <tt>remove_gp_synchronous()</tt>, as long as all modifications
+       to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
+       the above optimizations are harmless.
+       However,
+       with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt>,
+       <tt>sparse</tt> will complain if you
+       define <tt>gp</tt> with <tt>__rcu</tt> and then
+       access it without using
+       either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 In short, RCU's publish-subscribe guarantee is provided by the combination
@@ -571,17 +640,156 @@ systems with more than one CPU:
        <tt>synchronize_rcu()</tt> migrates in the meantime.
 </ol>
 
-<p><a name="Quick Quiz 5"><b>Quick Quiz 5</b>:</a>
-Given that multiple CPUs can start RCU read-side critical sections
-at any time without any ordering whatsoever, how can RCU possibly tell whether
-or not a given RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>?
-<br><a href="#qq5answer">Answer</a>
-
-<p><a name="Quick Quiz 6"><b>Quick Quiz 6</b>:</a>
-The first and second guarantees require unbelievably strict ordering!
-Are all these memory barriers <i> really</i> required?
-<br><a href="#qq6answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Given that multiple CPUs can start RCU read-side critical sections
+       at any time without any ordering whatsoever, how can RCU possibly
+       tell whether or not a given RCU read-side critical section starts
+       before a given instance of <tt>synchronize_rcu()</tt>?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       If RCU cannot tell whether or not a given
+       RCU read-side critical section starts before a
+       given instance of <tt>synchronize_rcu()</tt>,
+       then it must assume that the RCU read-side critical section
+       started first.
+       In other words, a given instance of <tt>synchronize_rcu()</tt>
+       can avoid waiting on a given RCU read-side critical section only
+       if it can prove that <tt>synchronize_rcu()</tt> started first.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       The first and second guarantees require unbelievably strict ordering!
+       Are all these memory barriers <i> really</i> required?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Yes, they really are required.
+       To see why the first guarantee is required, consider the following
+       sequence of events:
+       </font>
+
+       <ol>
+       <li>    <font color="ffffff">
+               CPU 1: <tt>rcu_read_lock()</tt>
+               </font>
+       <li>    <font color="ffffff">
+               CPU 1: <tt>q = rcu_dereference(gp);
+               /* Very likely to return p. */</tt>
+               </font>
+       <li>    <font color="ffffff">
+               CPU 0: <tt>list_del_rcu(p);</tt>
+               </font>
+       <li>    <font color="ffffff">
+               CPU 0: <tt>synchronize_rcu()</tt> starts.
+               </font>
+       <li>    <font color="ffffff">
+               CPU 1: <tt>do_something_with(q-&gt;a);
+               /* No smp_mb(), so might happen after kfree(). */</tt>
+               </font>
+       <li>    <font color="ffffff">
+               CPU 1: <tt>rcu_read_unlock()</tt>
+               </font>
+       <li>    <font color="ffffff">
+               CPU 0: <tt>synchronize_rcu()</tt> returns.
+               </font>
+       <li>    <font color="ffffff">
+               CPU 0: <tt>kfree(p);</tt>
+               </font>
+       </ol>
+
+       <p><font color="ffffff">
+       Therefore, there absolutely must be a full memory barrier between the
+       end of the RCU read-side critical section and the end of the
+       grace period.
+       </font>
+
+       <p><font color="ffffff">
+       The sequence of events demonstrating the necessity of the second rule
+       is roughly similar:
+       </font>
+
+       <ol>
+       <li>    <font color="ffffff">CPU 0: <tt>list_del_rcu(p);</tt>
+               </font>
+       <li>    <font color="ffffff">CPU 0: <tt>synchronize_rcu()</tt> starts.
+               </font>
+       <li>    <font color="ffffff">CPU 1: <tt>rcu_read_lock()</tt>
+               </font>
+       <li>    <font color="ffffff">CPU 1: <tt>q = rcu_dereference(gp);
+               /* Might return p if no memory barrier. */</tt>
+               </font>
+       <li>    <font color="ffffff">CPU 0: <tt>synchronize_rcu()</tt> returns.
+               </font>
+       <li>    <font color="ffffff">CPU 0: <tt>kfree(p);</tt>
+               </font>
+       <li>    <font color="ffffff">
+               CPU 1: <tt>do_something_with(q-&gt;a); /* Boom!!! */</tt>
+               </font>
+       <li>    <font color="ffffff">CPU 1: <tt>rcu_read_unlock()</tt>
+               </font>
+       </ol>
+
+       <p><font color="ffffff">
+       And similarly, without a memory barrier between the beginning of the
+       grace period and the beginning of the RCU read-side critical section,
+       CPU&nbsp;1 might end up accessing the freelist.
+       </font>
+
+       <p><font color="ffffff">
+       The &ldquo;as if&rdquo; rule of course applies, so that any
+       implementation that acts as if the appropriate memory barriers
+       were in place is a correct implementation.
+       That said, it is much easier to fool yourself into believing
+       that you have adhered to the as-if rule than it is to actually
+       adhere to it!
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       You claim that <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+       generate absolutely no code in some kernel builds.
+       This means that the compiler might arbitrarily rearrange consecutive
+       RCU read-side critical sections.
+       Given such rearrangement, if a given RCU read-side critical section
+       is done, how can you be sure that all prior RCU read-side critical
+       sections are done?
+       Won't the compiler rearrangements make that impossible to determine?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       In cases where <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+       generate absolutely no code, RCU infers quiescent states only at
+       special locations, for example, within the scheduler.
+       Because calls to <tt>schedule()</tt> had better prevent calling-code
+       accesses to shared variables from being rearranged across the call to
+       <tt>schedule()</tt>, if RCU detects the end of a given RCU read-side
+       critical section, it will necessarily detect the end of all prior
+       RCU read-side critical sections, no matter how aggressively the
+       compiler scrambles the code.
+       </font>
+
+       <p><font color="ffffff">
+       Again, this all assumes that the compiler cannot scramble code across
+       calls to the scheduler, out of interrupt handlers, into the idle loop,
+       into user-mode code, and so on.
+       But if your kernel build allows that sort of scrambling, you have broken
+       far more than just RCU!
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 Note that these memory-barrier requirements do not replace the fundamental
@@ -626,9 +834,19 @@ inconvenience can be avoided through use of the
 <tt>call_rcu()</tt> and <tt>kfree_rcu()</tt> API members
 described later in this document.
 
-<p><a name="Quick Quiz 7"><b>Quick Quiz 7</b>:</a>
-But how does the upgrade-to-write operation exclude other readers?
-<br><a href="#qq7answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       But how does the upgrade-to-write operation exclude other readers?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       It doesn't, just like normal RCU updates, which also do not exclude
+       RCU readers.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 This guarantee allows lookup code to be shared between read-side
@@ -714,9 +932,20 @@ to do significant reordering.
 This is by design:  Any significant ordering constraints would slow down
 these fast-path APIs.
 
-<p><a name="Quick Quiz 8"><b>Quick Quiz 8</b>:</a>
-Can't the compiler also reorder this code?
-<br><a href="#qq8answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Can't the compiler also reorder this code?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       No, the volatile casts in <tt>READ_ONCE()</tt> and
+       <tt>WRITE_ONCE()</tt> prevent the compiler from reordering in
+       this particular case.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h3><a name="Readers Do Not Exclude Updaters">Readers Do Not Exclude Updaters</a></h3>
 
@@ -769,10 +998,28 @@ new readers can start immediately after <tt>synchronize_rcu()</tt>
 starts, and <tt>synchronize_rcu()</tt> is under no
 obligation to wait for these new readers.
 
-<p><a name="Quick Quiz 9"><b>Quick Quiz 9</b>:</a>
-Suppose that synchronize_rcu() did wait until all readers had completed.
-Would the updater be able to rely on this?
-<br><a href="#qq9answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Suppose that synchronize_rcu() did wait until <i>all</i>
+       readers had completed instead of waiting only on
+       pre-existing readers.
+       For how long would the updater be able to rely on there
+       being no readers?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       For no time at all.
+       Even if <tt>synchronize_rcu()</tt> were to wait until
+       all readers had completed, a new reader might start immediately after
+       <tt>synchronize_rcu()</tt> completed.
+       Therefore, the code following
+       <tt>synchronize_rcu()</tt> can <i>never</i> rely on there being
+       no readers.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h3><a name="Grace Periods Don't Partition Read-Side Critical Sections">
 Grace Periods Don't Partition Read-Side Critical Sections</a></h3>
@@ -969,11 +1216,24 @@ grace period.
 As a result, an RCU read-side critical section cannot partition a pair
 of RCU grace periods.
 
-<p><a name="Quick Quiz 10"><b>Quick Quiz 10</b>:</a>
-How long a sequence of grace periods, each separated by an RCU read-side
-critical section, would be required to partition the RCU read-side
-critical sections at the beginning and end of the chain?
-<br><a href="#qq10answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       How long a sequence of grace periods, each separated by an RCU
+       read-side critical section, would be required to partition the RCU
+       read-side critical sections at the beginning and end of the chain?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       In theory, an infinite number.
+       In practice, an unknown number that is sensitive to both implementation
+       details and timing considerations.
+       Therefore, even in practice, RCU users must abide by the
+       theoretical rather than the practical answer.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <h3><a name="Disabling Preemption Does Not Block Grace Periods">
 Disabling Preemption Does Not Block Grace Periods</a></h3>
@@ -1109,12 +1369,27 @@ These classes is covered in the following sections.
 <h3><a name="Specialization">Specialization</a></h3>
 
 <p>
-RCU is and always has been intended primarily for read-mostly situations, as
-illustrated by the following figure.
-This means that RCU's read-side primitives are optimized, often at the
+RCU is and always has been intended primarily for read-mostly situations,
+which means that RCU's read-side primitives are optimized, often at the
 expense of its update-side primitives.
+Experience thus far is captured by the following list of situations:
 
-<p><img src="RCUApplicability.svg" alt="RCUApplicability.svg" width="70%"></p>
+<ol>
+<li>   Read-mostly data, where stale and inconsistent data is not
+       a problem:   RCU works great!
+<li>   Read-mostly data, where data must be consistent:
+       RCU works well.
+<li>   Read-write data, where data must be consistent:
+       RCU <i>might</i> work OK.
+       Or not.
+<li>   Write-mostly data, where data must be consistent:
+       RCU is very unlikely to be the right tool for the job,
+       with the following exceptions, where RCU can provide:
+       <ol type=a>
+       <li>    Existence guarantees for update-friendly mechanisms.
+       <li>    Wait-free read-side primitives for real-time use.
+       </ol>
+</ol>
 
 <p>
 This focus on read-mostly situations means that RCU must interoperate
@@ -1127,9 +1402,43 @@ synchronization primitives be legal within RCU read-side critical sections,
 including spinlocks, sequence locks, atomic operations, reference
 counters, and memory barriers.
 
-<p><a name="Quick Quiz 11"><b>Quick Quiz 11</b>:</a>
-What about sleeping locks?
-<br><a href="#qq11answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       What about sleeping locks?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       These are forbidden within Linux-kernel RCU read-side critical
+       sections because it is not legal to place a quiescent state
+       (in this case, voluntary context switch) within an RCU read-side
+       critical section.
+       However, sleeping locks may be used within userspace RCU read-side
+       critical sections, and also within Linux-kernel sleepable RCU
+       <a href="#Sleepable RCU"><font color="ffffff">(SRCU)</font></a>
+       read-side critical sections.
+       In addition, the -rt patchset turns spinlocks into a
+       sleeping locks so that the corresponding critical sections
+       can be preempted, which also means that these sleeplockified
+       spinlocks (but not other sleeping locks!)  may be acquire within
+       -rt-Linux-kernel RCU read-side critical sections.
+       </font>
+
+       <p><font color="ffffff">
+       Note that it <i>is</i> legal for a normal RCU read-side
+       critical section to conditionally acquire a sleeping locks
+       (as in <tt>mutex_trylock()</tt>), but only as long as it does
+       not loop indefinitely attempting to conditionally acquire that
+       sleeping locks.
+       The key point is that things like <tt>mutex_trylock()</tt>
+       either return with the mutex held, or return an error indication if
+       the mutex was not immediately available.
+       Either way, <tt>mutex_trylock()</tt> returns immediately without
+       sleeping.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 It often comes as a surprise that many algorithms do not require a
@@ -1160,10 +1469,7 @@ some period of time, so the exact wait period is a judgment call.
 One of our pair of veternarians might wait 30 seconds before pronouncing
 the cat dead, while the other might insist on waiting a full minute.
 The two veternarians would then disagree on the state of the cat during
-the final 30 seconds of the minute following the last heartbeat, as
-fancifully illustrated below:
-
-<p><img src="2013-08-is-it-dead.png" alt="2013-08-is-it-dead.png" width="431"></p>
+the final 30 seconds of the minute following the last heartbeat.
 
 <p>
 Interestingly enough, this same situation applies to hardware.
@@ -1343,7 +1649,8 @@ situations where neither <tt>synchronize_rcu()</tt> nor
 <tt>synchronize_rcu_expedited()</tt> would be legal,
 including within preempt-disable code, <tt>local_bh_disable()</tt> code,
 interrupt-disable code, and interrupt handlers.
-However, even <tt>call_rcu()</tt> is illegal within NMI handlers.
+However, even <tt>call_rcu()</tt> is illegal within NMI handlers
+and from idle and offline CPUs.
 The callback function (<tt>remove_gp_cb()</tt> in this case) will be
 executed within softirq (software interrupt) environment within the
 Linux kernel,
@@ -1354,12 +1661,27 @@ write an RCU callback function that takes too long.
 Long-running operations should be relegated to separate threads or
 (in the Linux kernel) workqueues.
 
-<p><a name="Quick Quiz 12"><b>Quick Quiz 12</b>:</a>
-Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
-After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
-structure, which would interact badly with concurrent insertions.
-Doesn't this mean that <tt>rcu_dereference()</tt> is required?
-<br><a href="#qq12answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
+       After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
+       structure, which would interact badly with concurrent insertions.
+       Doesn't this mean that <tt>rcu_dereference()</tt> is required?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       Presumably the <tt>-&gt;gp_lock</tt> acquired on line&nbsp;18 excludes
+       any changes, including any insertions that <tt>rcu_dereference()</tt>
+       would protect against.
+       Therefore, any insertions will be delayed until after
+       <tt>-&gt;gp_lock</tt>
+       is released on line&nbsp;25, which in turn means that
+       <tt>rcu_access_pointer()</tt> suffices.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 However, all that <tt>remove_gp_cb()</tt> is doing is
@@ -1406,14 +1728,31 @@ This was due to the fact that RCU was not heavily used within DYNIX/ptx,
 so the very few places that needed something like
 <tt>synchronize_rcu()</tt> simply open-coded it.
 
-<p><a name="Quick Quiz 13"><b>Quick Quiz 13</b>:</a>
-Earlier it was claimed that <tt>call_rcu()</tt> and
-<tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
-by readers.
-But how can that be correct, given that the invocation of the callback
-and the freeing of the memory (respectively) must still wait for
-a grace period to elapse?
-<br><a href="#qq13answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       Earlier it was claimed that <tt>call_rcu()</tt> and
+       <tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
+       by readers.
+       But how can that be correct, given that the invocation of the callback
+       and the freeing of the memory (respectively) must still wait for
+       a grace period to elapse?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       We could define things this way, but keep in mind that this sort of
+       definition would say that updates in garbage-collected languages
+       cannot complete until the next time the garbage collector runs,
+       which does not seem at all reasonable.
+       The key point is that in most cases, an updater using either
+       <tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> can proceed to the
+       next update as soon as it has invoked <tt>call_rcu()</tt> or
+       <tt>kfree_rcu()</tt>, without having to wait for a subsequent
+       grace period.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 But what if the updater must wait for the completion of code to be
@@ -1838,11 +2177,26 @@ kthreads to be spawned.
 Therefore, invoking <tt>synchronize_rcu()</tt> during scheduler
 initialization can result in deadlock.
 
-<p><a name="Quick Quiz 14"><b>Quick Quiz 14</b>:</a>
-So what happens with <tt>synchronize_rcu()</tt> during
-scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
-kernels?
-<br><a href="#qq14answer">Answer</a>
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       So what happens with <tt>synchronize_rcu()</tt> during
+       scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
+       kernels?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       In <tt>CONFIG_PREEMPT=n</tt> kernel, <tt>synchronize_rcu()</tt>
+       maps directly to <tt>synchronize_sched()</tt>.
+       Therefore, <tt>synchronize_rcu()</tt> works normally throughout
+       boot in <tt>CONFIG_PREEMPT=n</tt> kernels.
+       However, your code must also work in <tt>CONFIG_PREEMPT=y</tt> kernels,
+       so it is still necessary to avoid invoking <tt>synchronize_rcu()</tt>
+       during scheduler initialization.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
 
 <p>
 I learned of these boot-time requirements as a result of a series of
@@ -2170,6 +2524,14 @@ up to and including systems with 4096 CPUs.
 This real-time requirement motivated the grace-period kthread, which
 also simplified handling of a number of race conditions.
 
+<p>
+RCU must avoid degrading real-time response for CPU-bound threads, whether
+executing in usermode (which is one use case for
+<tt>CONFIG_NO_HZ_FULL=y</tt>) or in the kernel.
+That said, CPU-bound loops in the kernel must execute
+<tt>cond_resched_rcu_qs()</tt> at least once per few tens of milliseconds
+in order to avoid receiving an IPI from RCU.
+
 <p>
 Finally, RCU's status as a synchronization primitive means that
 any RCU failure can result in arbitrary memory corruption that can be
@@ -2223,6 +2585,8 @@ described in a separate section.
 <li>   <a href="#Sched Flavor">Sched Flavor</a>
 <li>   <a href="#Sleepable RCU">Sleepable RCU</a>
 <li>   <a href="#Tasks RCU">Tasks RCU</a>
+<li>   <a href="#Waiting for Multiple Grace Periods">
+       Waiting for Multiple Grace Periods</a>
 </ol>
 
 <h3><a name="Bottom-Half Flavor">Bottom-Half Flavor</a></h3>
@@ -2472,6 +2836,94 @@ The tasks-RCU API is quite compact, consisting only of
 <tt>synchronize_rcu_tasks()</tt>, and
 <tt>rcu_barrier_tasks()</tt>.
 
+<h3><a name="Waiting for Multiple Grace Periods">
+Waiting for Multiple Grace Periods</a></h3>
+
+<p>
+Perhaps you have an RCU protected data structure that is accessed from
+RCU read-side critical sections, from softirq handlers, and from
+hardware interrupt handlers.
+That is three flavors of RCU, the normal flavor, the bottom-half flavor,
+and the sched flavor.
+How to wait for a compound grace period?
+
+<p>
+The best approach is usually to &ldquo;just say no!&rdquo; and
+insert <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>
+around each RCU read-side critical section, regardless of what
+environment it happens to be in.
+But suppose that some of the RCU read-side critical sections are
+on extremely hot code paths, and that use of <tt>CONFIG_PREEMPT=n</tt>
+is not a viable option, so that <tt>rcu_read_lock()</tt> and
+<tt>rcu_read_unlock()</tt> are not free.
+What then?
+
+<p>
+You <i>could</i> wait on all three grace periods in succession, as follows:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu();
+ 2 synchronize_rcu_bh();
+ 3 synchronize_sched();
+</pre>
+</blockquote>
+
+<p>
+This works, but triples the update-side latency penalty.
+In cases where this is not acceptable, <tt>synchronize_rcu_mult()</tt>
+may be used to wait on all three flavors of grace period concurrently:
+
+<blockquote>
+<pre>
+ 1 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched);
+</pre>
+</blockquote>
+
+<p>
+But what if it is necessary to also wait on SRCU?
+This can be done as follows:
+
+<blockquote>
+<pre>
+ 1 static void call_my_srcu(struct rcu_head *head,
+ 2        void (*func)(struct rcu_head *head))
+ 3 {
+ 4   call_srcu(&amp;my_srcu, head, func);
+ 5 }
+ 6
+ 7 synchronize_rcu_mult(call_rcu, call_rcu_bh, call_rcu_sched, call_my_srcu);
+</pre>
+</blockquote>
+
+<p>
+If you needed to wait on multiple different flavors of SRCU
+(but why???), you would need to create a wrapper function resembling
+<tt>call_my_srcu()</tt> for each SRCU flavor.
+
+<table>
+<tr><th>&nbsp;</th></tr>
+<tr><th align="left">Quick Quiz:</th></tr>
+<tr><td>
+       But what if I need to wait for multiple RCU flavors, but I also need
+       the grace periods to be expedited?
+</td></tr>
+<tr><th align="left">Answer:</th></tr>
+<tr><td bgcolor="#ffffff"><font color="ffffff">
+       If you are using expedited grace periods, there should be less penalty
+       for waiting on them in succession.
+       But if that is nevertheless a problem, you can use workqueues
+       or multiple kthreads to wait on the various expedited grace
+       periods concurrently.
+</font></td></tr>
+<tr><td>&nbsp;</td></tr>
+</table>
+
+<p>
+Again, it is usually better to adjust the RCU read-side critical sections
+to use a single flavor of RCU, but when this is not feasible, you can use
+<tt>synchronize_rcu_mult()</tt>.
+
 <h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
 
 <p>
@@ -2569,329 +3021,4 @@ and is provided
 under the terms of the Creative Commons Attribution-Share Alike 3.0
 United States license.
 
-<h3><a name="Answers to Quick Quizzes">
-Answers to Quick Quizzes</a></h3>
-
-<a name="qq1answer"></a>
-<p><b>Quick Quiz 1</b>:
-Wait a minute!
-You said that updaters can make useful forward progress concurrently
-with readers, but pre-existing readers will block
-<tt>synchronize_rcu()</tt>!!!
-Just who are you trying to fool???
-
-
-</p><p><b>Answer</b>:
-First, if updaters do not wish to be blocked by readers, they can use
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt>, which will
-be discussed later.
-Second, even when using <tt>synchronize_rcu()</tt>, the other
-update-side code does run concurrently with readers, whether pre-existing
-or not.
-
-
-</p><p><a href="#Quick%20Quiz%201"><b>Back to Quick Quiz 1</b>.</a>
-
-<a name="qq2answer"></a>
-<p><b>Quick Quiz 2</b>:
-Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
-
-
-</p><p><b>Answer</b>:
-Without that extra grace period, memory reordering could result in
-<tt>do_something_dlm()</tt> executing <tt>do_something()</tt>
-concurrently with the last bits of <tt>recovery()</tt>.
-
-
-</p><p><a href="#Quick%20Quiz%202"><b>Back to Quick Quiz 2</b>.</a>
-
-<a name="qq3answer"></a>
-<p><b>Quick Quiz 3</b>:
-But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
-two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
-from being reordered.
-Can't that also cause problems?
-
-
-</p><p><b>Answer</b>:
-No, it cannot.
-The readers cannot see either of these two fields until
-the assignment to <tt>gp</tt>, by which time both fields are
-fully initialized.
-So reordering the assignments
-to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt> cannot possibly
-cause any problems.
-
-
-</p><p><a href="#Quick%20Quiz%203"><b>Back to Quick Quiz 3</b>.</a>
-
-<a name="qq4answer"></a>
-<p><b>Quick Quiz 4</b>:
-Without the <tt>rcu_dereference()</tt> or the
-<tt>rcu_access_pointer()</tt>, what destructive optimizations
-might the compiler make use of?
-
-
-</p><p><b>Answer</b>:
-Let's start with what happens to <tt>do_something_gp()</tt>
-if it fails to use <tt>rcu_dereference()</tt>.
-It could reuse a value formerly fetched from this same pointer.
-It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
-manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
-mash-up of two distince pointer values.
-It might even use value-speculation optimizations, where it makes a wrong
-guess, but by the time it gets around to checking the value, an update
-has changed the pointer to match the wrong guess.
-Too bad about any dereferences that returned pre-initialization garbage
-in the meantime!
-
-<p>
-For <tt>remove_gp_synchronous()</tt>, as long as all modifications
-to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
-the above optimizations are harmless.
-However,
-with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt>,
-<tt>sparse</tt> will complain if you
-define <tt>gp</tt> with <tt>__rcu</tt> and then
-access it without using
-either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
-
-
-</p><p><a href="#Quick%20Quiz%204"><b>Back to Quick Quiz 4</b>.</a>
-
-<a name="qq5answer"></a>
-<p><b>Quick Quiz 5</b>:
-Given that multiple CPUs can start RCU read-side critical sections
-at any time without any ordering whatsoever, how can RCU possibly tell whether
-or not a given RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>?
-
-
-</p><p><b>Answer</b>:
-If RCU cannot tell whether or not a given
-RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>,
-then it must assume that the RCU read-side critical section
-started first.
-In other words, a given instance of <tt>synchronize_rcu()</tt>
-can avoid waiting on a given RCU read-side critical section only
-if it can prove that <tt>synchronize_rcu()</tt> started first.
-
-
-</p><p><a href="#Quick%20Quiz%205"><b>Back to Quick Quiz 5</b>.</a>
-
-<a name="qq6answer"></a>
-<p><b>Quick Quiz 6</b>:
-The first and second guarantees require unbelievably strict ordering!
-Are all these memory barriers <i> really</i> required?
-
-
-</p><p><b>Answer</b>:
-Yes, they really are required.
-To see why the first guarantee is required, consider the following
-sequence of events:
-
-<ol>
-<li>   CPU 1: <tt>rcu_read_lock()</tt>
-<li>   CPU 1: <tt>q = rcu_dereference(gp);
-       /* Very likely to return p. */</tt>
-<li>   CPU 0: <tt>list_del_rcu(p);</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>   CPU 1: <tt>do_something_with(q-&gt;a);
-       /* No smp_mb(), so might happen after kfree(). */</tt>
-<li>   CPU 1: <tt>rcu_read_unlock()</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>   CPU 0: <tt>kfree(p);</tt>
-</ol>
-
-<p>
-Therefore, there absolutely must be a full memory barrier between the
-end of the RCU read-side critical section and the end of the
-grace period.
-
-<p>
-The sequence of events demonstrating the necessity of the second rule
-is roughly similar:
-
-<ol>
-<li>   CPU 0: <tt>list_del_rcu(p);</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>   CPU 1: <tt>rcu_read_lock()</tt>
-<li>   CPU 1: <tt>q = rcu_dereference(gp);
-       /* Might return p if no memory barrier. */</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>   CPU 0: <tt>kfree(p);</tt>
-<li>   CPU 1: <tt>do_something_with(q-&gt;a); /* Boom!!! */</tt>
-<li>   CPU 1: <tt>rcu_read_unlock()</tt>
-</ol>
-
-<p>
-And similarly, without a memory barrier between the beginning of the
-grace period and the beginning of the RCU read-side critical section,
-CPU&nbsp;1 might end up accessing the freelist.
-
-<p>
-The &ldquo;as if&rdquo; rule of course applies, so that any implementation
-that acts as if the appropriate memory barriers were in place is a
-correct implementation.
-That said, it is much easier to fool yourself into believing that you have
-adhered to the as-if rule than it is to actually adhere to it!
-
-
-</p><p><a href="#Quick%20Quiz%206"><b>Back to Quick Quiz 6</b>.</a>
-
-<a name="qq7answer"></a>
-<p><b>Quick Quiz 7</b>:
-But how does the upgrade-to-write operation exclude other readers?
-
-
-</p><p><b>Answer</b>:
-It doesn't, just like normal RCU updates, which also do not exclude
-RCU readers.
-
-
-</p><p><a href="#Quick%20Quiz%207"><b>Back to Quick Quiz 7</b>.</a>
-
-<a name="qq8answer"></a>
-<p><b>Quick Quiz 8</b>:
-Can't the compiler also reorder this code?
-
-
-</p><p><b>Answer</b>:
-No, the volatile casts in <tt>READ_ONCE()</tt> and
-<tt>WRITE_ONCE()</tt> prevent the compiler from reordering in
-this particular case.
-
-
-</p><p><a href="#Quick%20Quiz%208"><b>Back to Quick Quiz 8</b>.</a>
-
-<a name="qq9answer"></a>
-<p><b>Quick Quiz 9</b>:
-Suppose that synchronize_rcu() did wait until all readers had completed.
-Would the updater be able to rely on this?
-
-
-</p><p><b>Answer</b>:
-No.
-Even if <tt>synchronize_rcu()</tt> were to wait until
-all readers had completed, a new reader might start immediately after
-<tt>synchronize_rcu()</tt> completed.
-Therefore, the code following
-<tt>synchronize_rcu()</tt> cannot rely on there being no readers
-in any case.
-
-
-</p><p><a href="#Quick%20Quiz%209"><b>Back to Quick Quiz 9</b>.</a>
-
-<a name="qq10answer"></a>
-<p><b>Quick Quiz 10</b>:
-How long a sequence of grace periods, each separated by an RCU read-side
-critical section, would be required to partition the RCU read-side
-critical sections at the beginning and end of the chain?
-
-
-</p><p><b>Answer</b>:
-In theory, an infinite number.
-In practice, an unknown number that is sensitive to both implementation
-details and timing considerations.
-Therefore, even in practice, RCU users must abide by the theoretical rather
-than the practical answer.
-
-
-</p><p><a href="#Quick%20Quiz%2010"><b>Back to Quick Quiz 10</b>.</a>
-
-<a name="qq11answer"></a>
-<p><b>Quick Quiz 11</b>:
-What about sleeping locks?
-
-
-</p><p><b>Answer</b>:
-These are forbidden within Linux-kernel RCU read-side critical sections
-because it is not legal to place a quiescent state (in this case,
-voluntary context switch) within an RCU read-side critical section.
-However, sleeping locks may be used within userspace RCU read-side critical
-sections, and also within Linux-kernel sleepable RCU
-<a href="#Sleepable RCU">(SRCU)</a>
-read-side critical sections.
-In addition, the -rt patchset turns spinlocks into a sleeping locks so
-that the corresponding critical sections can be preempted, which
-also means that these sleeplockified spinlocks (but not other sleeping locks!)
-may be acquire within -rt-Linux-kernel RCU read-side critical sections.
-
-<p>
-Note that it <i>is</i> legal for a normal RCU read-side critical section
-to conditionally acquire a sleeping locks (as in <tt>mutex_trylock()</tt>),
-but only as long as it does not loop indefinitely attempting to
-conditionally acquire that sleeping locks.
-The key point is that things like <tt>mutex_trylock()</tt>
-either return with the mutex held, or return an error indication if
-the mutex was not immediately available.
-Either way, <tt>mutex_trylock()</tt> returns immediately without sleeping.
-
-
-</p><p><a href="#Quick%20Quiz%2011"><b>Back to Quick Quiz 11</b>.</a>
-
-<a name="qq12answer"></a>
-<p><b>Quick Quiz 12</b>:
-Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
-After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
-structure, which would interact badly with concurrent insertions.
-Doesn't this mean that <tt>rcu_dereference()</tt> is required?
-
-
-</p><p><b>Answer</b>:
-Presumably the <tt>-&gt;gp_lock</tt> acquired on line&nbsp;18 excludes
-any changes, including any insertions that <tt>rcu_dereference()</tt>
-would protect against.
-Therefore, any insertions will be delayed until after <tt>-&gt;gp_lock</tt>
-is released on line&nbsp;25, which in turn means that
-<tt>rcu_access_pointer()</tt> suffices.
-
-
-</p><p><a href="#Quick%20Quiz%2012"><b>Back to Quick Quiz 12</b>.</a>
-
-<a name="qq13answer"></a>
-<p><b>Quick Quiz 13</b>:
-Earlier it was claimed that <tt>call_rcu()</tt> and
-<tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
-by readers.
-But how can that be correct, given that the invocation of the callback
-and the freeing of the memory (respectively) must still wait for
-a grace period to elapse?
-
-
-</p><p><b>Answer</b>:
-We could define things this way, but keep in mind that this sort of
-definition would say that updates in garbage-collected languages
-cannot complete until the next time the garbage collector runs,
-which does not seem at all reasonable.
-The key point is that in most cases, an updater using either
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> can proceed to the
-next update as soon as it has invoked <tt>call_rcu()</tt> or
-<tt>kfree_rcu()</tt>, without having to wait for a subsequent
-grace period.
-
-
-</p><p><a href="#Quick%20Quiz%2013"><b>Back to Quick Quiz 13</b>.</a>
-
-<a name="qq14answer"></a>
-<p><b>Quick Quiz 14</b>:
-So what happens with <tt>synchronize_rcu()</tt> during
-scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
-kernels?
-
-
-</p><p><b>Answer</b>:
-In <tt>CONFIG_PREEMPT=n</tt> kernel, <tt>synchronize_rcu()</tt>
-maps directly to <tt>synchronize_sched()</tt>.
-Therefore, <tt>synchronize_rcu()</tt> works normally throughout
-boot in <tt>CONFIG_PREEMPT=n</tt> kernels.
-However, your code must also work in <tt>CONFIG_PREEMPT=y</tt> kernels,
-so it is still necessary to avoid invoking <tt>synchronize_rcu()</tt>
-during scheduler initialization.
-
-
-</p><p><a href="#Quick%20Quiz%2014"><b>Back to Quick Quiz 14</b>.</a>
-
-
 </body></html>
diff --git a/Documentation/RCU/Design/Requirements/Requirements.htmlx b/Documentation/RCU/Design/Requirements/Requirements.htmlx
deleted file mode 100644 (file)
index 3a97ba4..0000000
+++ /dev/null
@@ -1,2741 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
-        "http://www.w3.org/TR/html4/loose.dtd">
-        <html>
-        <head><title>A Tour Through RCU's Requirements [LWN.net]</title>
-        <meta HTTP-EQUIV="Content-Type" CONTENT="text/html; charset=utf-8">
-
-<h1>A Tour Through RCU's Requirements</h1>
-
-<p>Copyright IBM Corporation, 2015</p>
-<p>Author: Paul E.&nbsp;McKenney</p>
-<p><i>The initial version of this document appeared in the
-<a href="https://lwn.net/">LWN</a> articles
-<a href="https://lwn.net/Articles/652156/">here</a>,
-<a href="https://lwn.net/Articles/652677/">here</a>, and
-<a href="https://lwn.net/Articles/653326/">here</a>.</i></p>
-
-<h2>Introduction</h2>
-
-<p>
-Read-copy update (RCU) is a synchronization mechanism that is often
-used as a replacement for reader-writer locking.
-RCU is unusual in that updaters do not block readers,
-which means that RCU's read-side primitives can be exceedingly fast
-and scalable.
-In addition, updaters can make useful forward progress concurrently
-with readers.
-However, all this concurrency between RCU readers and updaters does raise
-the question of exactly what RCU readers are doing, which in turn
-raises the question of exactly what RCU's requirements are.
-
-<p>
-This document therefore summarizes RCU's requirements, and can be thought
-of as an informal, high-level specification for RCU.
-It is important to understand that RCU's specification is primarily
-empirical in nature;
-in fact, I learned about many of these requirements the hard way.
-This situation might cause some consternation, however, not only
-has this learning process been a lot of fun, but it has also been
-a great privilege to work with so many people willing to apply
-technologies in interesting new ways.
-
-<p>
-All that aside, here are the categories of currently known RCU requirements:
-</p>
-
-<ol>
-<li>   <a href="#Fundamental Requirements">
-       Fundamental Requirements</a>
-<li>   <a href="#Fundamental Non-Requirements">Fundamental Non-Requirements</a>
-<li>   <a href="#Parallelism Facts of Life">
-       Parallelism Facts of Life</a>
-<li>   <a href="#Quality-of-Implementation Requirements">
-       Quality-of-Implementation Requirements</a>
-<li>   <a href="#Linux Kernel Complications">
-       Linux Kernel Complications</a>
-<li>   <a href="#Software-Engineering Requirements">
-       Software-Engineering Requirements</a>
-<li>   <a href="#Other RCU Flavors">
-       Other RCU Flavors</a>
-<li>   <a href="#Possible Future Changes">
-       Possible Future Changes</a>
-</ol>
-
-<p>
-This is followed by a <a href="#Summary">summary</a>,
-which is in turn followed by the inevitable
-<a href="#Answers to Quick Quizzes">answers to the quick quizzes</a>.
-
-<h2><a name="Fundamental Requirements">Fundamental Requirements</a></h2>
-
-<p>
-RCU's fundamental requirements are the closest thing RCU has to hard
-mathematical requirements.
-These are:
-
-<ol>
-<li>   <a href="#Grace-Period Guarantee">
-       Grace-Period Guarantee</a>
-<li>   <a href="#Publish-Subscribe Guarantee">
-       Publish-Subscribe Guarantee</a>
-<li>   <a href="#Memory-Barrier Guarantees">
-       Memory-Barrier Guarantees</a>
-<li>   <a href="#RCU Primitives Guaranteed to Execute Unconditionally">
-       RCU Primitives Guaranteed to Execute Unconditionally</a>
-<li>   <a href="#Guaranteed Read-to-Write Upgrade">
-       Guaranteed Read-to-Write Upgrade</a>
-</ol>
-
-<h3><a name="Grace-Period Guarantee">Grace-Period Guarantee</a></h3>
-
-<p>
-RCU's grace-period guarantee is unusual in being premeditated:
-Jack Slingwine and I had this guarantee firmly in mind when we started
-work on RCU (then called &ldquo;rclock&rdquo;) in the early 1990s.
-That said, the past two decades of experience with RCU have produced
-a much more detailed understanding of this guarantee.
-
-<p>
-RCU's grace-period guarantee allows updaters to wait for the completion
-of all pre-existing RCU read-side critical sections.
-An RCU read-side critical section
-begins with the marker <tt>rcu_read_lock()</tt> and ends with
-the marker <tt>rcu_read_unlock()</tt>.
-These markers may be nested, and RCU treats a nested set as one
-big RCU read-side critical section.
-Production-quality implementations of <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> are extremely lightweight, and in
-fact have exactly zero overhead in Linux kernels built for production
-use with <tt>CONFIG_PREEMPT=n</tt>.
-
-<p>
-This guarantee allows ordering to be enforced with extremely low
-overhead to readers, for example:
-
-<blockquote>
-<pre>
- 1 int x, y;
- 2
- 3 void thread0(void)
- 4 {
- 5   rcu_read_lock();
- 6   r1 = READ_ONCE(x);
- 7   r2 = READ_ONCE(y);
- 8   rcu_read_unlock();
- 9 }
-10
-11 void thread1(void)
-12 {
-13   WRITE_ONCE(x, 1);
-14   synchronize_rcu();
-15   WRITE_ONCE(y, 1);
-16 }
-</pre>
-</blockquote>
-
-<p>
-Because the <tt>synchronize_rcu()</tt> on line&nbsp;14 waits for
-all pre-existing readers, any instance of <tt>thread0()</tt> that
-loads a value of zero from <tt>x</tt> must complete before
-<tt>thread1()</tt> stores to <tt>y</tt>, so that instance must
-also load a value of zero from <tt>y</tt>.
-Similarly, any instance of <tt>thread0()</tt> that loads a value of
-one from <tt>y</tt> must have started after the
-<tt>synchronize_rcu()</tt> started, and must therefore also load
-a value of one from <tt>x</tt>.
-Therefore, the outcome:
-<blockquote>
-<pre>
-(r1 == 0 &amp;&amp; r2 == 1)
-</pre>
-</blockquote>
-cannot happen.
-
-<p>@@QQ@@
-Wait a minute!
-You said that updaters can make useful forward progress concurrently
-with readers, but pre-existing readers will block
-<tt>synchronize_rcu()</tt>!!!
-Just who are you trying to fool???
-<p>@@QQA@@
-First, if updaters do not wish to be blocked by readers, they can use
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt>, which will
-be discussed later.
-Second, even when using <tt>synchronize_rcu()</tt>, the other
-update-side code does run concurrently with readers, whether pre-existing
-or not.
-<p>@@QQE@@
-
-<p>
-This scenario resembles one of the first uses of RCU in
-<a href="https://en.wikipedia.org/wiki/DYNIX">DYNIX/ptx</a>,
-which managed a distributed lock manager's transition into
-a state suitable for handling recovery from node failure,
-more or less as follows:
-
-<blockquote>
-<pre>
- 1 #define STATE_NORMAL        0
- 2 #define STATE_WANT_RECOVERY 1
- 3 #define STATE_RECOVERING    2
- 4 #define STATE_WANT_NORMAL   3
- 5
- 6 int state = STATE_NORMAL;
- 7
- 8 void do_something_dlm(void)
- 9 {
-10   int state_snap;
-11
-12   rcu_read_lock();
-13   state_snap = READ_ONCE(state);
-14   if (state_snap == STATE_NORMAL)
-15     do_something();
-16   else
-17     do_something_carefully();
-18   rcu_read_unlock();
-19 }
-20
-21 void start_recovery(void)
-22 {
-23   WRITE_ONCE(state, STATE_WANT_RECOVERY);
-24   synchronize_rcu();
-25   WRITE_ONCE(state, STATE_RECOVERING);
-26   recovery();
-27   WRITE_ONCE(state, STATE_WANT_NORMAL);
-28   synchronize_rcu();
-29   WRITE_ONCE(state, STATE_NORMAL);
-30 }
-</pre>
-</blockquote>
-
-<p>
-The RCU read-side critical section in <tt>do_something_dlm()</tt>
-works with the <tt>synchronize_rcu()</tt> in <tt>start_recovery()</tt>
-to guarantee that <tt>do_something()</tt> never runs concurrently
-with <tt>recovery()</tt>, but with little or no synchronization
-overhead in <tt>do_something_dlm()</tt>.
-
-<p>@@QQ@@
-Why is the <tt>synchronize_rcu()</tt> on line&nbsp;28 needed?
-<p>@@QQA@@
-Without that extra grace period, memory reordering could result in
-<tt>do_something_dlm()</tt> executing <tt>do_something()</tt>
-concurrently with the last bits of <tt>recovery()</tt>.
-<p>@@QQE@@
-
-<p>
-In order to avoid fatal problems such as deadlocks,
-an RCU read-side critical section must not contain calls to
-<tt>synchronize_rcu()</tt>.
-Similarly, an RCU read-side critical section must not
-contain anything that waits, directly or indirectly, on completion of
-an invocation of <tt>synchronize_rcu()</tt>.
-
-<p>
-Although RCU's grace-period guarantee is useful in and of itself, with
-<a href="https://lwn.net/Articles/573497/">quite a few use cases</a>,
-it would be good to be able to use RCU to coordinate read-side
-access to linked data structures.
-For this, the grace-period guarantee is not sufficient, as can
-be seen in function <tt>add_gp_buggy()</tt> below.
-We will look at the reader's code later, but in the meantime, just think of
-the reader as locklessly picking up the <tt>gp</tt> pointer,
-and, if the value loaded is non-<tt>NULL</tt>, locklessly accessing the
-<tt>-&gt;a</tt> and <tt>-&gt;b</tt> fields.
-
-<blockquote>
-<pre>
- 1 bool add_gp_buggy(int a, int b)
- 2 {
- 3   p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4   if (!p)
- 5     return -ENOMEM;
- 6   spin_lock(&amp;gp_lock);
- 7   if (rcu_access_pointer(gp)) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-11   p-&gt;a = a;
-12   p-&gt;b = a;
-13   gp = p; /* ORDERING BUG */
-14   spin_unlock(&amp;gp_lock);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-The problem is that both the compiler and weakly ordered CPUs are within
-their rights to reorder this code as follows:
-
-<blockquote>
-<pre>
- 1 bool add_gp_buggy_optimized(int a, int b)
- 2 {
- 3   p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4   if (!p)
- 5     return -ENOMEM;
- 6   spin_lock(&amp;gp_lock);
- 7   if (rcu_access_pointer(gp)) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-<b>11   gp = p; /* ORDERING BUG */
-12   p-&gt;a = a;
-13   p-&gt;b = a;</b>
-14   spin_unlock(&amp;gp_lock);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-If an RCU reader fetches <tt>gp</tt> just after
-<tt>add_gp_buggy_optimized</tt> executes line&nbsp;11,
-it will see garbage in the <tt>-&gt;a</tt> and <tt>-&gt;b</tt>
-fields.
-And this is but one of many ways in which compiler and hardware optimizations
-could cause trouble.
-Therefore, we clearly need some way to prevent the compiler and the CPU from
-reordering in this manner, which brings us to the publish-subscribe
-guarantee discussed in the next section.
-
-<h3><a name="Publish-Subscribe Guarantee">Publish/Subscribe Guarantee</a></h3>
-
-<p>
-RCU's publish-subscribe guarantee allows data to be inserted
-into a linked data structure without disrupting RCU readers.
-The updater uses <tt>rcu_assign_pointer()</tt> to insert the
-new data, and readers use <tt>rcu_dereference()</tt> to
-access data, whether new or old.
-The following shows an example of insertion:
-
-<blockquote>
-<pre>
- 1 bool add_gp(int a, int b)
- 2 {
- 3   p = kmalloc(sizeof(*p), GFP_KERNEL);
- 4   if (!p)
- 5     return -ENOMEM;
- 6   spin_lock(&amp;gp_lock);
- 7   if (rcu_access_pointer(gp)) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-11   p-&gt;a = a;
-12   p-&gt;b = a;
-13   rcu_assign_pointer(gp, p);
-14   spin_unlock(&amp;gp_lock);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-The <tt>rcu_assign_pointer()</tt> on line&nbsp;13 is conceptually
-equivalent to a simple assignment statement, but also guarantees
-that its assignment will
-happen after the two assignments in lines&nbsp;11 and&nbsp;12,
-similar to the C11 <tt>memory_order_release</tt> store operation.
-It also prevents any number of &ldquo;interesting&rdquo; compiler
-optimizations, for example, the use of <tt>gp</tt> as a scratch
-location immediately preceding the assignment.
-
-<p>@@QQ@@
-But <tt>rcu_assign_pointer()</tt> does nothing to prevent the
-two assignments to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt>
-from being reordered.
-Can't that also cause problems?
-<p>@@QQA@@
-No, it cannot.
-The readers cannot see either of these two fields until
-the assignment to <tt>gp</tt>, by which time both fields are
-fully initialized.
-So reordering the assignments
-to <tt>p-&gt;a</tt> and <tt>p-&gt;b</tt> cannot possibly
-cause any problems.
-<p>@@QQE@@
-
-<p>
-It is tempting to assume that the reader need not do anything special
-to control its accesses to the RCU-protected data,
-as shown in <tt>do_something_gp_buggy()</tt> below:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp_buggy(void)
- 2 {
- 3   rcu_read_lock();
- 4   p = gp;  /* OPTIMIZATIONS GALORE!!! */
- 5   if (p) {
- 6     do_something(p-&gt;a, p-&gt;b);
- 7     rcu_read_unlock();
- 8     return true;
- 9   }
-10   rcu_read_unlock();
-11   return false;
-12 }
-</pre>
-</blockquote>
-
-<p>
-However, this temptation must be resisted because there are a
-surprisingly large number of ways that the compiler
-(to say nothing of
-<a href="https://h71000.www7.hp.com/wizard/wiz_2637.html">DEC Alpha CPUs</a>)
-can trip this code up.
-For but one example, if the compiler were short of registers, it
-might choose to refetch from <tt>gp</tt> rather than keeping
-a separate copy in <tt>p</tt> as follows:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp_buggy_optimized(void)
- 2 {
- 3   rcu_read_lock();
- 4   if (gp) { /* OPTIMIZATIONS GALORE!!! */
-<b> 5     do_something(gp-&gt;a, gp-&gt;b);</b>
- 6     rcu_read_unlock();
- 7     return true;
- 8   }
- 9   rcu_read_unlock();
-10   return false;
-11 }
-</pre>
-</blockquote>
-
-<p>
-If this function ran concurrently with a series of updates that
-replaced the current structure with a new one,
-the fetches of <tt>gp-&gt;a</tt>
-and <tt>gp-&gt;b</tt> might well come from two different structures,
-which could cause serious confusion.
-To prevent this (and much else besides), <tt>do_something_gp()</tt> uses
-<tt>rcu_dereference()</tt> to fetch from <tt>gp</tt>:
-
-<blockquote>
-<pre>
- 1 bool do_something_gp(void)
- 2 {
- 3   rcu_read_lock();
- 4   p = rcu_dereference(gp);
- 5   if (p) {
- 6     do_something(p-&gt;a, p-&gt;b);
- 7     rcu_read_unlock();
- 8     return true;
- 9   }
-10   rcu_read_unlock();
-11   return false;
-12 }
-</pre>
-</blockquote>
-
-<p>
-The <tt>rcu_dereference()</tt> uses volatile casts and (for DEC Alpha)
-memory barriers in the Linux kernel.
-Should a
-<a href="http://www.rdrop.com/users/paulmck/RCU/consume.2015.07.13a.pdf">high-quality implementation of C11 <tt>memory_order_consume</tt> [PDF]</a>
-ever appear, then <tt>rcu_dereference()</tt> could be implemented
-as a <tt>memory_order_consume</tt> load.
-Regardless of the exact implementation, a pointer fetched by
-<tt>rcu_dereference()</tt> may not be used outside of the
-outermost RCU read-side critical section containing that
-<tt>rcu_dereference()</tt>, unless protection of
-the corresponding data element has been passed from RCU to some
-other synchronization mechanism, most commonly locking or
-<a href="https://www.kernel.org/doc/Documentation/RCU/rcuref.txt">reference counting</a>.
-
-<p>
-In short, updaters use <tt>rcu_assign_pointer()</tt> and readers
-use <tt>rcu_dereference()</tt>, and these two RCU API elements
-work together to ensure that readers have a consistent view of
-newly added data elements.
-
-<p>
-Of course, it is also necessary to remove elements from RCU-protected
-data structures, for example, using the following process:
-
-<ol>
-<li>   Remove the data element from the enclosing structure.
-<li>   Wait for all pre-existing RCU read-side critical sections
-       to complete (because only pre-existing readers can possibly have
-       a reference to the newly removed data element).
-<li>   At this point, only the updater has a reference to the
-       newly removed data element, so it can safely reclaim
-       the data element, for example, by passing it to <tt>kfree()</tt>.
-</ol>
-
-This process is implemented by <tt>remove_gp_synchronous()</tt>:
-
-<blockquote>
-<pre>
- 1 bool remove_gp_synchronous(void)
- 2 {
- 3   struct foo *p;
- 4
- 5   spin_lock(&amp;gp_lock);
- 6   p = rcu_access_pointer(gp);
- 7   if (!p) {
- 8     spin_unlock(&amp;gp_lock);
- 9     return false;
-10   }
-11   rcu_assign_pointer(gp, NULL);
-12   spin_unlock(&amp;gp_lock);
-13   synchronize_rcu();
-14   kfree(p);
-15   return true;
-16 }
-</pre>
-</blockquote>
-
-<p>
-This function is straightforward, with line&nbsp;13 waiting for a grace
-period before line&nbsp;14 frees the old data element.
-This waiting ensures that readers will reach line&nbsp;7 of
-<tt>do_something_gp()</tt> before the data element referenced by
-<tt>p</tt> is freed.
-The <tt>rcu_access_pointer()</tt> on line&nbsp;6 is similar to
-<tt>rcu_dereference()</tt>, except that:
-
-<ol>
-<li>   The value returned by <tt>rcu_access_pointer()</tt>
-       cannot be dereferenced.
-       If you want to access the value pointed to as well as
-       the pointer itself, use <tt>rcu_dereference()</tt>
-       instead of <tt>rcu_access_pointer()</tt>.
-<li>   The call to <tt>rcu_access_pointer()</tt> need not be
-       protected.
-       In contrast, <tt>rcu_dereference()</tt> must either be
-       within an RCU read-side critical section or in a code
-       segment where the pointer cannot change, for example, in
-       code protected by the corresponding update-side lock.
-</ol>
-
-<p>@@QQ@@
-Without the <tt>rcu_dereference()</tt> or the
-<tt>rcu_access_pointer()</tt>, what destructive optimizations
-might the compiler make use of?
-<p>@@QQA@@
-Let's start with what happens to <tt>do_something_gp()</tt>
-if it fails to use <tt>rcu_dereference()</tt>.
-It could reuse a value formerly fetched from this same pointer.
-It could also fetch the pointer from <tt>gp</tt> in a byte-at-a-time
-manner, resulting in <i>load tearing</i>, in turn resulting a bytewise
-mash-up of two distince pointer values.
-It might even use value-speculation optimizations, where it makes a wrong
-guess, but by the time it gets around to checking the value, an update
-has changed the pointer to match the wrong guess.
-Too bad about any dereferences that returned pre-initialization garbage
-in the meantime!
-
-<p>
-For <tt>remove_gp_synchronous()</tt>, as long as all modifications
-to <tt>gp</tt> are carried out while holding <tt>gp_lock</tt>,
-the above optimizations are harmless.
-However,
-with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt>,
-<tt>sparse</tt> will complain if you
-define <tt>gp</tt> with <tt>__rcu</tt> and then
-access it without using
-either <tt>rcu_access_pointer()</tt> or <tt>rcu_dereference()</tt>.
-<p>@@QQE@@
-
-<p>
-In short, RCU's publish-subscribe guarantee is provided by the combination
-of <tt>rcu_assign_pointer()</tt> and <tt>rcu_dereference()</tt>.
-This guarantee allows data elements to be safely added to RCU-protected
-linked data structures without disrupting RCU readers.
-This guarantee can be used in combination with the grace-period
-guarantee to also allow data elements to be removed from RCU-protected
-linked data structures, again without disrupting RCU readers.
-
-<p>
-This guarantee was only partially premeditated.
-DYNIX/ptx used an explicit memory barrier for publication, but had nothing
-resembling <tt>rcu_dereference()</tt> for subscription, nor did it
-have anything resembling the <tt>smp_read_barrier_depends()</tt>
-that was later subsumed into <tt>rcu_dereference()</tt>.
-The need for these operations made itself known quite suddenly at a
-late-1990s meeting with the DEC Alpha architects, back in the days when
-DEC was still a free-standing company.
-It took the Alpha architects a good hour to convince me that any sort
-of barrier would ever be needed, and it then took me a good <i>two</i> hours
-to convince them that their documentation did not make this point clear.
-More recent work with the C and C++ standards committees have provided
-much education on tricks and traps from the compiler.
-In short, compilers were much less tricky in the early 1990s, but in
-2015, don't even think about omitting <tt>rcu_dereference()</tt>!
-
-<h3><a name="Memory-Barrier Guarantees">Memory-Barrier Guarantees</a></h3>
-
-<p>
-The previous section's simple linked-data-structure scenario clearly
-demonstrates the need for RCU's stringent memory-ordering guarantees on
-systems with more than one CPU:
-
-<ol>
-<li>   Each CPU that has an RCU read-side critical section that
-       begins before <tt>synchronize_rcu()</tt> starts is
-       guaranteed to execute a full memory barrier between the time
-       that the RCU read-side critical section ends and the time that
-       <tt>synchronize_rcu()</tt> returns.
-       Without this guarantee, a pre-existing RCU read-side critical section
-       might hold a reference to the newly removed <tt>struct foo</tt>
-       after the <tt>kfree()</tt> on line&nbsp;14 of
-       <tt>remove_gp_synchronous()</tt>.
-<li>   Each CPU that has an RCU read-side critical section that ends
-       after <tt>synchronize_rcu()</tt> returns is guaranteed
-       to execute a full memory barrier between the time that
-       <tt>synchronize_rcu()</tt> begins and the time that the RCU
-       read-side critical section begins.
-       Without this guarantee, a later RCU read-side critical section
-       running after the <tt>kfree()</tt> on line&nbsp;14 of
-       <tt>remove_gp_synchronous()</tt> might
-       later run <tt>do_something_gp()</tt> and find the
-       newly deleted <tt>struct foo</tt>.
-<li>   If the task invoking <tt>synchronize_rcu()</tt> remains
-       on a given CPU, then that CPU is guaranteed to execute a full
-       memory barrier sometime during the execution of
-       <tt>synchronize_rcu()</tt>.
-       This guarantee ensures that the <tt>kfree()</tt> on
-       line&nbsp;14 of <tt>remove_gp_synchronous()</tt> really does
-       execute after the removal on line&nbsp;11.
-<li>   If the task invoking <tt>synchronize_rcu()</tt> migrates
-       among a group of CPUs during that invocation, then each of the
-       CPUs in that group is guaranteed to execute a full memory barrier
-       sometime during the execution of <tt>synchronize_rcu()</tt>.
-       This guarantee also ensures that the <tt>kfree()</tt> on
-       line&nbsp;14 of <tt>remove_gp_synchronous()</tt> really does
-       execute after the removal on
-       line&nbsp;11, but also in the case where the thread executing the
-       <tt>synchronize_rcu()</tt> migrates in the meantime.
-</ol>
-
-<p>@@QQ@@
-Given that multiple CPUs can start RCU read-side critical sections
-at any time without any ordering whatsoever, how can RCU possibly tell whether
-or not a given RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>?
-<p>@@QQA@@
-If RCU cannot tell whether or not a given
-RCU read-side critical section starts before a
-given instance of <tt>synchronize_rcu()</tt>,
-then it must assume that the RCU read-side critical section
-started first.
-In other words, a given instance of <tt>synchronize_rcu()</tt>
-can avoid waiting on a given RCU read-side critical section only
-if it can prove that <tt>synchronize_rcu()</tt> started first.
-<p>@@QQE@@
-
-<p>@@QQ@@
-The first and second guarantees require unbelievably strict ordering!
-Are all these memory barriers <i> really</i> required?
-<p>@@QQA@@
-Yes, they really are required.
-To see why the first guarantee is required, consider the following
-sequence of events:
-
-<ol>
-<li>   CPU 1: <tt>rcu_read_lock()</tt>
-<li>   CPU 1: <tt>q = rcu_dereference(gp);
-       /* Very likely to return p. */</tt>
-<li>   CPU 0: <tt>list_del_rcu(p);</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>   CPU 1: <tt>do_something_with(q-&gt;a);
-       /* No smp_mb(), so might happen after kfree(). */</tt>
-<li>   CPU 1: <tt>rcu_read_unlock()</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>   CPU 0: <tt>kfree(p);</tt>
-</ol>
-
-<p>
-Therefore, there absolutely must be a full memory barrier between the
-end of the RCU read-side critical section and the end of the
-grace period.
-
-<p>
-The sequence of events demonstrating the necessity of the second rule
-is roughly similar:
-
-<ol>
-<li>   CPU 0: <tt>list_del_rcu(p);</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> starts.
-<li>   CPU 1: <tt>rcu_read_lock()</tt>
-<li>   CPU 1: <tt>q = rcu_dereference(gp);
-       /* Might return p if no memory barrier. */</tt>
-<li>   CPU 0: <tt>synchronize_rcu()</tt> returns.
-<li>   CPU 0: <tt>kfree(p);</tt>
-<li>   CPU 1: <tt>do_something_with(q-&gt;a); /* Boom!!! */</tt>
-<li>   CPU 1: <tt>rcu_read_unlock()</tt>
-</ol>
-
-<p>
-And similarly, without a memory barrier between the beginning of the
-grace period and the beginning of the RCU read-side critical section,
-CPU&nbsp;1 might end up accessing the freelist.
-
-<p>
-The &ldquo;as if&rdquo; rule of course applies, so that any implementation
-that acts as if the appropriate memory barriers were in place is a
-correct implementation.
-That said, it is much easier to fool yourself into believing that you have
-adhered to the as-if rule than it is to actually adhere to it!
-<p>@@QQE@@
-
-<p>
-Note that these memory-barrier requirements do not replace the fundamental
-RCU requirement that a grace period wait for all pre-existing readers.
-On the contrary, the memory barriers called out in this section must operate in
-such a way as to <i>enforce</i> this fundamental requirement.
-Of course, different implementations enforce this requirement in different
-ways, but enforce it they must.
-
-<h3><a name="RCU Primitives Guaranteed to Execute Unconditionally">RCU Primitives Guaranteed to Execute Unconditionally</a></h3>
-
-<p>
-The common-case RCU primitives are unconditional.
-They are invoked, they do their job, and they return, with no possibility
-of error, and no need to retry.
-This is a key RCU design philosophy.
-
-<p>
-However, this philosophy is pragmatic rather than pigheaded.
-If someone comes up with a good justification for a particular conditional
-RCU primitive, it might well be implemented and added.
-After all, this guarantee was reverse-engineered, not premeditated.
-The unconditional nature of the RCU primitives was initially an
-accident of implementation, and later experience with synchronization
-primitives with conditional primitives caused me to elevate this
-accident to a guarantee.
-Therefore, the justification for adding a conditional primitive to
-RCU would need to be based on detailed and compelling use cases.
-
-<h3><a name="Guaranteed Read-to-Write Upgrade">Guaranteed Read-to-Write Upgrade</a></h3>
-
-<p>
-As far as RCU is concerned, it is always possible to carry out an
-update within an RCU read-side critical section.
-For example, that RCU read-side critical section might search for
-a given data element, and then might acquire the update-side
-spinlock in order to update that element, all while remaining
-in that RCU read-side critical section.
-Of course, it is necessary to exit the RCU read-side critical section
-before invoking <tt>synchronize_rcu()</tt>, however, this
-inconvenience can be avoided through use of the
-<tt>call_rcu()</tt> and <tt>kfree_rcu()</tt> API members
-described later in this document.
-
-<p>@@QQ@@
-But how does the upgrade-to-write operation exclude other readers?
-<p>@@QQA@@
-It doesn't, just like normal RCU updates, which also do not exclude
-RCU readers.
-<p>@@QQE@@
-
-<p>
-This guarantee allows lookup code to be shared between read-side
-and update-side code, and was premeditated, appearing in the earliest
-DYNIX/ptx RCU documentation.
-
-<h2><a name="Fundamental Non-Requirements">Fundamental Non-Requirements</a></h2>
-
-<p>
-RCU provides extremely lightweight readers, and its read-side guarantees,
-though quite useful, are correspondingly lightweight.
-It is therefore all too easy to assume that RCU is guaranteeing more
-than it really is.
-Of course, the list of things that RCU does not guarantee is infinitely
-long, however, the following sections list a few non-guarantees that
-have caused confusion.
-Except where otherwise noted, these non-guarantees were premeditated.
-
-<ol>
-<li>   <a href="#Readers Impose Minimal Ordering">
-       Readers Impose Minimal Ordering</a>
-<li>   <a href="#Readers Do Not Exclude Updaters">
-       Readers Do Not Exclude Updaters</a>
-<li>   <a href="#Updaters Only Wait For Old Readers">
-       Updaters Only Wait For Old Readers</a>
-<li>   <a href="#Grace Periods Don't Partition Read-Side Critical Sections">
-       Grace Periods Don't Partition Read-Side Critical Sections</a>
-<li>   <a href="#Read-Side Critical Sections Don't Partition Grace Periods">
-       Read-Side Critical Sections Don't Partition Grace Periods</a>
-<li>   <a href="#Disabling Preemption Does Not Block Grace Periods">
-       Disabling Preemption Does Not Block Grace Periods</a>
-</ol>
-
-<h3><a name="Readers Impose Minimal Ordering">Readers Impose Minimal Ordering</a></h3>
-
-<p>
-Reader-side markers such as <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> provide absolutely no ordering guarantees
-except through their interaction with the grace-period APIs such as
-<tt>synchronize_rcu()</tt>.
-To see this, consider the following pair of threads:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(x, 1);
- 5   rcu_read_unlock();
- 6   rcu_read_lock();
- 7   WRITE_ONCE(y, 1);
- 8   rcu_read_unlock();
- 9 }
-10
-11 void thread1(void)
-12 {
-13   rcu_read_lock();
-14   r1 = READ_ONCE(y);
-15   rcu_read_unlock();
-16   rcu_read_lock();
-17   r2 = READ_ONCE(x);
-18   rcu_read_unlock();
-19 }
-</pre>
-</blockquote>
-
-<p>
-After <tt>thread0()</tt> and <tt>thread1()</tt> execute
-concurrently, it is quite possible to have
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 0)
-</pre>
-</blockquote>
-
-(that is, <tt>y</tt> appears to have been assigned before <tt>x</tt>),
-which would not be possible if <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> had much in the way of ordering
-properties.
-But they do not, so the CPU is within its rights
-to do significant reordering.
-This is by design:  Any significant ordering constraints would slow down
-these fast-path APIs.
-
-<p>@@QQ@@
-Can't the compiler also reorder this code?
-<p>@@QQA@@
-No, the volatile casts in <tt>READ_ONCE()</tt> and
-<tt>WRITE_ONCE()</tt> prevent the compiler from reordering in
-this particular case.
-<p>@@QQE@@
-
-<h3><a name="Readers Do Not Exclude Updaters">Readers Do Not Exclude Updaters</a></h3>
-
-<p>
-Neither <tt>rcu_read_lock()</tt> nor <tt>rcu_read_unlock()</tt>
-exclude updates.
-All they do is to prevent grace periods from ending.
-The following example illustrates this:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   r1 = READ_ONCE(y);
- 5   if (r1) {
- 6     do_something_with_nonzero_x();
- 7     r2 = READ_ONCE(x);
- 8     WARN_ON(!r2); /* BUG!!! */
- 9   }
-10   rcu_read_unlock();
-11 }
-12
-13 void thread1(void)
-14 {
-15   spin_lock(&amp;my_lock);
-16   WRITE_ONCE(x, 1);
-17   WRITE_ONCE(y, 1);
-18   spin_unlock(&amp;my_lock);
-19 }
-</pre>
-</blockquote>
-
-<p>
-If the <tt>thread0()</tt> function's <tt>rcu_read_lock()</tt>
-excluded the <tt>thread1()</tt> function's update,
-the <tt>WARN_ON()</tt> could never fire.
-But the fact is that <tt>rcu_read_lock()</tt> does not exclude
-much of anything aside from subsequent grace periods, of which
-<tt>thread1()</tt> has none, so the
-<tt>WARN_ON()</tt> can and does fire.
-
-<h3><a name="Updaters Only Wait For Old Readers">Updaters Only Wait For Old Readers</a></h3>
-
-<p>
-It might be tempting to assume that after <tt>synchronize_rcu()</tt>
-completes, there are no readers executing.
-This temptation must be avoided because
-new readers can start immediately after <tt>synchronize_rcu()</tt>
-starts, and <tt>synchronize_rcu()</tt> is under no
-obligation to wait for these new readers.
-
-<p>@@QQ@@
-Suppose that synchronize_rcu() did wait until all readers had completed.
-Would the updater be able to rely on this?
-<p>@@QQA@@
-No.
-Even if <tt>synchronize_rcu()</tt> were to wait until
-all readers had completed, a new reader might start immediately after
-<tt>synchronize_rcu()</tt> completed.
-Therefore, the code following
-<tt>synchronize_rcu()</tt> cannot rely on there being no readers
-in any case.
-<p>@@QQE@@
-
-<h3><a name="Grace Periods Don't Partition Read-Side Critical Sections">
-Grace Periods Don't Partition Read-Side Critical Sections</a></h3>
-
-<p>
-It is tempting to assume that if any part of one RCU read-side critical
-section precedes a given grace period, and if any part of another RCU
-read-side critical section follows that same grace period, then all of
-the first RCU read-side critical section must precede all of the second.
-However, this just isn't the case: A single grace period does not
-partition the set of RCU read-side critical sections.
-An example of this situation can be illustrated as follows, where
-<tt>x</tt>, <tt>y</tt>, and <tt>z</tt> are initially all zero:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(a, 1);
- 5   WRITE_ONCE(b, 1);
- 6   rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11   r1 = READ_ONCE(a);
-12   synchronize_rcu();
-13   WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18   rcu_read_lock();
-19   r2 = READ_ONCE(b);
-20   r3 = READ_ONCE(c);
-21   rcu_read_unlock();
-22 }
-</pre>
-</blockquote>
-
-<p>
-It turns out that the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 0 &amp;&amp; r3 == 1)
-</pre>
-</blockquote>
-
-is entirely possible.
-The following figure show how this can happen, with each circled
-<tt>QS</tt> indicating the point at which RCU recorded a
-<i>quiescent state</i> for each thread, that is, a state in which
-RCU knows that the thread cannot be in the midst of an RCU read-side
-critical section that started before the current grace period:
-
-<p><img src="GPpartitionReaders1.svg" alt="GPpartitionReaders1.svg" width="60%"></p>
-
-<p>
-If it is necessary to partition RCU read-side critical sections in this
-manner, it is necessary to use two grace periods, where the first
-grace period is known to end before the second grace period starts:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(a, 1);
- 5   WRITE_ONCE(b, 1);
- 6   rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11   r1 = READ_ONCE(a);
-12   synchronize_rcu();
-13   WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18   r2 = READ_ONCE(c);
-19   synchronize_rcu();
-20   WRITE_ONCE(d, 1);
-21 }
-22
-23 void thread3(void)
-24 {
-25   rcu_read_lock();
-26   r3 = READ_ONCE(b);
-27   r4 = READ_ONCE(d);
-28   rcu_read_unlock();
-29 }
-</pre>
-</blockquote>
-
-<p>
-Here, if <tt>(r1 == 1)</tt>, then
-<tt>thread0()</tt>'s write to <tt>b</tt> must happen
-before the end of <tt>thread1()</tt>'s grace period.
-If in addition <tt>(r4 == 1)</tt>, then
-<tt>thread3()</tt>'s read from <tt>b</tt> must happen
-after the beginning of <tt>thread2()</tt>'s grace period.
-If it is also the case that <tt>(r2 == 1)</tt>, then the
-end of <tt>thread1()</tt>'s grace period must precede the
-beginning of <tt>thread2()</tt>'s grace period.
-This mean that the two RCU read-side critical sections cannot overlap,
-guaranteeing that <tt>(r3 == 1)</tt>.
-As a result, the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 1 &amp;&amp; r3 == 0 &amp;&amp; r4 == 1)
-</pre>
-</blockquote>
-
-cannot happen.
-
-<p>
-This non-requirement was also non-premeditated, but became apparent
-when studying RCU's interaction with memory ordering.
-
-<h3><a name="Read-Side Critical Sections Don't Partition Grace Periods">
-Read-Side Critical Sections Don't Partition Grace Periods</a></h3>
-
-<p>
-It is also tempting to assume that if an RCU read-side critical section
-happens between a pair of grace periods, then those grace periods cannot
-overlap.
-However, this temptation leads nowhere good, as can be illustrated by
-the following, with all variables initially zero:
-
-<blockquote>
-<pre>
- 1 void thread0(void)
- 2 {
- 3   rcu_read_lock();
- 4   WRITE_ONCE(a, 1);
- 5   WRITE_ONCE(b, 1);
- 6   rcu_read_unlock();
- 7 }
- 8
- 9 void thread1(void)
-10 {
-11   r1 = READ_ONCE(a);
-12   synchronize_rcu();
-13   WRITE_ONCE(c, 1);
-14 }
-15
-16 void thread2(void)
-17 {
-18   rcu_read_lock();
-19   WRITE_ONCE(d, 1);
-20   r2 = READ_ONCE(c);
-21   rcu_read_unlock();
-22 }
-23
-24 void thread3(void)
-25 {
-26   r3 = READ_ONCE(d);
-27   synchronize_rcu();
-28   WRITE_ONCE(e, 1);
-29 }
-30
-31 void thread4(void)
-32 {
-33   rcu_read_lock();
-34   r4 = READ_ONCE(b);
-35   r5 = READ_ONCE(e);
-36   rcu_read_unlock();
-37 }
-</pre>
-</blockquote>
-
-<p>
-In this case, the outcome:
-
-<blockquote>
-<pre>
-(r1 == 1 &amp;&amp; r2 == 1 &amp;&amp; r3 == 1 &amp;&amp; r4 == 0 &amp&amp; r5 == 1)
-</pre>
-</blockquote>
-
-is entirely possible, as illustrated below:
-
-<p><img src="ReadersPartitionGP1.svg" alt="ReadersPartitionGP1.svg" width="100%"></p>
-
-<p>
-Again, an RCU read-side critical section can overlap almost all of a
-given grace period, just so long as it does not overlap the entire
-grace period.
-As a result, an RCU read-side critical section cannot partition a pair
-of RCU grace periods.
-
-<p>@@QQ@@
-How long a sequence of grace periods, each separated by an RCU read-side
-critical section, would be required to partition the RCU read-side
-critical sections at the beginning and end of the chain?
-<p>@@QQA@@
-In theory, an infinite number.
-In practice, an unknown number that is sensitive to both implementation
-details and timing considerations.
-Therefore, even in practice, RCU users must abide by the theoretical rather
-than the practical answer.
-<p>@@QQE@@
-
-<h3><a name="Disabling Preemption Does Not Block Grace Periods">
-Disabling Preemption Does Not Block Grace Periods</a></h3>
-
-<p>
-There was a time when disabling preemption on any given CPU would block
-subsequent grace periods.
-However, this was an accident of implementation and is not a requirement.
-And in the current Linux-kernel implementation, disabling preemption
-on a given CPU in fact does not block grace periods, as Oleg Nesterov
-<a href="https://lkml.kernel.org/g/20150614193825.GA19582@redhat.com">demonstrated</a>.
-
-<p>
-If you need a preempt-disable region to block grace periods, you need to add
-<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>, for example
-as follows:
-
-<blockquote>
-<pre>
- 1 preempt_disable();
- 2 rcu_read_lock();
- 3 do_something();
- 4 rcu_read_unlock();
- 5 preempt_enable();
- 6
- 7 /* Spinlocks implicitly disable preemption. */
- 8 spin_lock(&amp;mylock);
- 9 rcu_read_lock();
-10 do_something();
-11 rcu_read_unlock();
-12 spin_unlock(&amp;mylock);
-</pre>
-</blockquote>
-
-<p>
-In theory, you could enter the RCU read-side critical section first,
-but it is more efficient to keep the entire RCU read-side critical
-section contained in the preempt-disable region as shown above.
-Of course, RCU read-side critical sections that extend outside of
-preempt-disable regions will work correctly, but such critical sections
-can be preempted, which forces <tt>rcu_read_unlock()</tt> to do
-more work.
-And no, this is <i>not</i> an invitation to enclose all of your RCU
-read-side critical sections within preempt-disable regions, because
-doing so would degrade real-time response.
-
-<p>
-This non-requirement appeared with preemptible RCU.
-If you need a grace period that waits on non-preemptible code regions, use
-<a href="#Sched Flavor">RCU-sched</a>.
-
-<h2><a name="Parallelism Facts of Life">Parallelism Facts of Life</a></h2>
-
-<p>
-These parallelism facts of life are by no means specific to RCU, but
-the RCU implementation must abide by them.
-They therefore bear repeating:
-
-<ol>
-<li>   Any CPU or task may be delayed at any time,
-       and any attempts to avoid these delays by disabling
-       preemption, interrupts, or whatever are completely futile.
-       This is most obvious in preemptible user-level
-       environments and in virtualized environments (where
-       a given guest OS's VCPUs can be preempted at any time by
-       the underlying hypervisor), but can also happen in bare-metal
-       environments due to ECC errors, NMIs, and other hardware
-       events.
-       Although a delay of more than about 20 seconds can result
-       in splats, the RCU implementation is obligated to use
-       algorithms that can tolerate extremely long delays, but where
-       &ldquo;extremely long&rdquo; is not long enough to allow
-       wrap-around when incrementing a 64-bit counter.
-<li>   Both the compiler and the CPU can reorder memory accesses.
-       Where it matters, RCU must use compiler directives and
-       memory-barrier instructions to preserve ordering.
-<li>   Conflicting writes to memory locations in any given cache line
-       will result in expensive cache misses.
-       Greater numbers of concurrent writes and more-frequent
-       concurrent writes will result in more dramatic slowdowns.
-       RCU is therefore obligated to use algorithms that have
-       sufficient locality to avoid significant performance and
-       scalability problems.
-<li>   As a rough rule of thumb, only one CPU's worth of processing
-       may be carried out under the protection of any given exclusive
-       lock.
-       RCU must therefore use scalable locking designs.
-<li>   Counters are finite, especially on 32-bit systems.
-       RCU's use of counters must therefore tolerate counter wrap,
-       or be designed such that counter wrap would take way more
-       time than a single system is likely to run.
-       An uptime of ten years is quite possible, a runtime
-       of a century much less so.
-       As an example of the latter, RCU's dyntick-idle nesting counter
-       allows 54 bits for interrupt nesting level (this counter
-       is 64 bits even on a 32-bit system).
-       Overflowing this counter requires 2<sup>54</sup>
-       half-interrupts on a given CPU without that CPU ever going idle.
-       If a half-interrupt happened every microsecond, it would take
-       570 years of runtime to overflow this counter, which is currently
-       believed to be an acceptably long time.
-<li>   Linux systems can have thousands of CPUs running a single
-       Linux kernel in a single shared-memory environment.
-       RCU must therefore pay close attention to high-end scalability.
-</ol>
-
-<p>
-This last parallelism fact of life means that RCU must pay special
-attention to the preceding facts of life.
-The idea that Linux might scale to systems with thousands of CPUs would
-have been met with some skepticism in the 1990s, but these requirements
-would have otherwise have been unsurprising, even in the early 1990s.
-
-<h2><a name="Quality-of-Implementation Requirements">Quality-of-Implementation Requirements</a></h2>
-
-<p>
-These sections list quality-of-implementation requirements.
-Although an RCU implementation that ignores these requirements could
-still be used, it would likely be subject to limitations that would
-make it inappropriate for industrial-strength production use.
-Classes of quality-of-implementation requirements are as follows:
-
-<ol>
-<li>   <a href="#Specialization">Specialization</a>
-<li>   <a href="#Performance and Scalability">Performance and Scalability</a>
-<li>   <a href="#Composability">Composability</a>
-<li>   <a href="#Corner Cases">Corner Cases</a>
-</ol>
-
-<p>
-These classes is covered in the following sections.
-
-<h3><a name="Specialization">Specialization</a></h3>
-
-<p>
-RCU is and always has been intended primarily for read-mostly situations, as
-illustrated by the following figure.
-This means that RCU's read-side primitives are optimized, often at the
-expense of its update-side primitives.
-
-<p><img src="RCUApplicability.svg" alt="RCUApplicability.svg" width="70%"></p>
-
-<p>
-This focus on read-mostly situations means that RCU must interoperate
-with other synchronization primitives.
-For example, the <tt>add_gp()</tt> and <tt>remove_gp_synchronous()</tt>
-examples discussed earlier use RCU to protect readers and locking to
-coordinate updaters.
-However, the need extends much farther, requiring that a variety of
-synchronization primitives be legal within RCU read-side critical sections,
-including spinlocks, sequence locks, atomic operations, reference
-counters, and memory barriers.
-
-<p>@@QQ@@
-What about sleeping locks?
-<p>@@QQA@@
-These are forbidden within Linux-kernel RCU read-side critical sections
-because it is not legal to place a quiescent state (in this case,
-voluntary context switch) within an RCU read-side critical section.
-However, sleeping locks may be used within userspace RCU read-side critical
-sections, and also within Linux-kernel sleepable RCU
-<a href="#Sleepable RCU">(SRCU)</a>
-read-side critical sections.
-In addition, the -rt patchset turns spinlocks into a sleeping locks so
-that the corresponding critical sections can be preempted, which
-also means that these sleeplockified spinlocks (but not other sleeping locks!)
-may be acquire within -rt-Linux-kernel RCU read-side critical sections.
-
-<p>
-Note that it <i>is</i> legal for a normal RCU read-side critical section
-to conditionally acquire a sleeping locks (as in <tt>mutex_trylock()</tt>),
-but only as long as it does not loop indefinitely attempting to
-conditionally acquire that sleeping locks.
-The key point is that things like <tt>mutex_trylock()</tt>
-either return with the mutex held, or return an error indication if
-the mutex was not immediately available.
-Either way, <tt>mutex_trylock()</tt> returns immediately without sleeping.
-<p>@@QQE@@
-
-<p>
-It often comes as a surprise that many algorithms do not require a
-consistent view of data, but many can function in that mode,
-with network routing being the poster child.
-Internet routing algorithms take significant time to propagate
-updates, so that by the time an update arrives at a given system,
-that system has been sending network traffic the wrong way for
-a considerable length of time.
-Having a few threads continue to send traffic the wrong way for a
-few more milliseconds is clearly not a problem:  In the worst case,
-TCP retransmissions will eventually get the data where it needs to go.
-In general, when tracking the state of the universe outside of the
-computer, some level of inconsistency must be tolerated due to
-speed-of-light delays if nothing else.
-
-<p>
-Furthermore, uncertainty about external state is inherent in many cases.
-For example, a pair of veternarians might use heartbeat to determine
-whether or not a given cat was alive.
-But how long should they wait after the last heartbeat to decide that
-the cat is in fact dead?
-Waiting less than 400 milliseconds makes no sense because this would
-mean that a relaxed cat would be considered to cycle between death
-and life more than 100 times per minute.
-Moreover, just as with human beings, a cat's heart might stop for
-some period of time, so the exact wait period is a judgment call.
-One of our pair of veternarians might wait 30 seconds before pronouncing
-the cat dead, while the other might insist on waiting a full minute.
-The two veternarians would then disagree on the state of the cat during
-the final 30 seconds of the minute following the last heartbeat, as
-fancifully illustrated below:
-
-<p><img src="2013-08-is-it-dead.png" alt="2013-08-is-it-dead.png" width="431"></p>
-
-<p>
-Interestingly enough, this same situation applies to hardware.
-When push comes to shove, how do we tell whether or not some
-external server has failed?
-We send messages to it periodically, and declare it failed if we
-don't receive a response within a given period of time.
-Policy decisions can usually tolerate short
-periods of inconsistency.
-The policy was decided some time ago, and is only now being put into
-effect, so a few milliseconds of delay is normally inconsequential.
-
-<p>
-However, there are algorithms that absolutely must see consistent data.
-For example, the translation between a user-level SystemV semaphore
-ID to the corresponding in-kernel data structure is protected by RCU,
-but it is absolutely forbidden to update a semaphore that has just been
-removed.
-In the Linux kernel, this need for consistency is accommodated by acquiring
-spinlocks located in the in-kernel data structure from within
-the RCU read-side critical section, and this is indicated by the
-green box in the figure above.
-Many other techniques may be used, and are in fact used within the
-Linux kernel.
-
-<p>
-In short, RCU is not required to maintain consistency, and other
-mechanisms may be used in concert with RCU when consistency is required.
-RCU's specialization allows it to do its job extremely well, and its
-ability to interoperate with other synchronization mechanisms allows
-the right mix of synchronization tools to be used for a given job.
-
-<h3><a name="Performance and Scalability">Performance and Scalability</a></h3>
-
-<p>
-Energy efficiency is a critical component of performance today,
-and Linux-kernel RCU implementations must therefore avoid unnecessarily
-awakening idle CPUs.
-I cannot claim that this requirement was premeditated.
-In fact, I learned of it during a telephone conversation in which I
-was given &ldquo;frank and open&rdquo; feedback on the importance
-of energy efficiency in battery-powered systems and on specific
-energy-efficiency shortcomings of the Linux-kernel RCU implementation.
-In my experience, the battery-powered embedded community will consider
-any unnecessary wakeups to be extremely unfriendly acts.
-So much so that mere Linux-kernel-mailing-list posts are
-insufficient to vent their ire.
-
-<p>
-Memory consumption is not particularly important for in most
-situations, and has become decreasingly
-so as memory sizes have expanded and memory
-costs have plummeted.
-However, as I learned from Matt Mackall's
-<a href="http://elinux.org/Linux_Tiny-FAQ">bloatwatch</a>
-efforts, memory footprint is critically important on single-CPU systems with
-non-preemptible (<tt>CONFIG_PREEMPT=n</tt>) kernels, and thus
-<a href="https://lkml.kernel.org/g/20090113221724.GA15307@linux.vnet.ibm.com">tiny RCU</a>
-was born.
-Josh Triplett has since taken over the small-memory banner with his
-<a href="https://tiny.wiki.kernel.org/">Linux kernel tinification</a>
-project, which resulted in
-<a href="#Sleepable RCU">SRCU</a>
-becoming optional for those kernels not needing it.
-
-<p>
-The remaining performance requirements are, for the most part,
-unsurprising.
-For example, in keeping with RCU's read-side specialization,
-<tt>rcu_dereference()</tt> should have negligible overhead (for
-example, suppression of a few minor compiler optimizations).
-Similarly, in non-preemptible environments, <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> should have exactly zero overhead.
-
-<p>
-In preemptible environments, in the case where the RCU read-side
-critical section was not preempted (as will be the case for the
-highest-priority real-time process), <tt>rcu_read_lock()</tt> and
-<tt>rcu_read_unlock()</tt> should have minimal overhead.
-In particular, they should not contain atomic read-modify-write
-operations, memory-barrier instructions, preemption disabling,
-interrupt disabling, or backwards branches.
-However, in the case where the RCU read-side critical section was preempted,
-<tt>rcu_read_unlock()</tt> may acquire spinlocks and disable interrupts.
-This is why it is better to nest an RCU read-side critical section
-within a preempt-disable region than vice versa, at least in cases
-where that critical section is short enough to avoid unduly degrading
-real-time latencies.
-
-<p>
-The <tt>synchronize_rcu()</tt> grace-period-wait primitive is
-optimized for throughput.
-It may therefore incur several milliseconds of latency in addition to
-the duration of the longest RCU read-side critical section.
-On the other hand, multiple concurrent invocations of
-<tt>synchronize_rcu()</tt> are required to use batching optimizations
-so that they can be satisfied by a single underlying grace-period-wait
-operation.
-For example, in the Linux kernel, it is not unusual for a single
-grace-period-wait operation to serve more than
-<a href="https://www.usenix.org/conference/2004-usenix-annual-technical-conference/making-rcu-safe-deep-sub-millisecond-response">1,000 separate invocations</a>
-of <tt>synchronize_rcu()</tt>, thus amortizing the per-invocation
-overhead down to nearly zero.
-However, the grace-period optimization is also required to avoid
-measurable degradation of real-time scheduling and interrupt latencies.
-
-<p>
-In some cases, the multi-millisecond <tt>synchronize_rcu()</tt>
-latencies are unacceptable.
-In these cases, <tt>synchronize_rcu_expedited()</tt> may be used
-instead, reducing the grace-period latency down to a few tens of
-microseconds on small systems, at least in cases where the RCU read-side
-critical sections are short.
-There are currently no special latency requirements for
-<tt>synchronize_rcu_expedited()</tt> on large systems, but,
-consistent with the empirical nature of the RCU specification,
-that is subject to change.
-However, there most definitely are scalability requirements:
-A storm of <tt>synchronize_rcu_expedited()</tt> invocations on 4096
-CPUs should at least make reasonable forward progress.
-In return for its shorter latencies, <tt>synchronize_rcu_expedited()</tt>
-is permitted to impose modest degradation of real-time latency
-on non-idle online CPUs.
-That said, it will likely be necessary to take further steps to reduce this
-degradation, hopefully to roughly that of a scheduling-clock interrupt.
-
-<p>
-There are a number of situations where even
-<tt>synchronize_rcu_expedited()</tt>'s reduced grace-period
-latency is unacceptable.
-In these situations, the asynchronous <tt>call_rcu()</tt> can be
-used in place of <tt>synchronize_rcu()</tt> as follows:
-
-<blockquote>
-<pre>
- 1 struct foo {
- 2   int a;
- 3   int b;
- 4   struct rcu_head rh;
- 5 };
- 6
- 7 static void remove_gp_cb(struct rcu_head *rhp)
- 8 {
- 9   struct foo *p = container_of(rhp, struct foo, rh);
-10
-11   kfree(p);
-12 }
-13
-14 bool remove_gp_asynchronous(void)
-15 {
-16   struct foo *p;
-17
-18   spin_lock(&amp;gp_lock);
-19   p = rcu_dereference(gp);
-20   if (!p) {
-21     spin_unlock(&amp;gp_lock);
-22     return false;
-23   }
-24   rcu_assign_pointer(gp, NULL);
-25   call_rcu(&amp;p-&gt;rh, remove_gp_cb);
-26   spin_unlock(&amp;gp_lock);
-27   return true;
-28 }
-</pre>
-</blockquote>
-
-<p>
-A definition of <tt>struct foo</tt> is finally needed, and appears
-on lines&nbsp;1-5.
-The function <tt>remove_gp_cb()</tt> is passed to <tt>call_rcu()</tt>
-on line&nbsp;25, and will be invoked after the end of a subsequent
-grace period.
-This gets the same effect as <tt>remove_gp_synchronous()</tt>,
-but without forcing the updater to wait for a grace period to elapse.
-The <tt>call_rcu()</tt> function may be used in a number of
-situations where neither <tt>synchronize_rcu()</tt> nor
-<tt>synchronize_rcu_expedited()</tt> would be legal,
-including within preempt-disable code, <tt>local_bh_disable()</tt> code,
-interrupt-disable code, and interrupt handlers.
-However, even <tt>call_rcu()</tt> is illegal within NMI handlers.
-The callback function (<tt>remove_gp_cb()</tt> in this case) will be
-executed within softirq (software interrupt) environment within the
-Linux kernel,
-either within a real softirq handler or under the protection
-of <tt>local_bh_disable()</tt>.
-In both the Linux kernel and in userspace, it is bad practice to
-write an RCU callback function that takes too long.
-Long-running operations should be relegated to separate threads or
-(in the Linux kernel) workqueues.
-
-<p>@@QQ@@
-Why does line&nbsp;19 use <tt>rcu_access_pointer()</tt>?
-After all, <tt>call_rcu()</tt> on line&nbsp;25 stores into the
-structure, which would interact badly with concurrent insertions.
-Doesn't this mean that <tt>rcu_dereference()</tt> is required?
-<p>@@QQA@@
-Presumably the <tt>-&gt;gp_lock</tt> acquired on line&nbsp;18 excludes
-any changes, including any insertions that <tt>rcu_dereference()</tt>
-would protect against.
-Therefore, any insertions will be delayed until after <tt>-&gt;gp_lock</tt>
-is released on line&nbsp;25, which in turn means that
-<tt>rcu_access_pointer()</tt> suffices.
-<p>@@QQE@@
-
-<p>
-However, all that <tt>remove_gp_cb()</tt> is doing is
-invoking <tt>kfree()</tt> on the data element.
-This is a common idiom, and is supported by <tt>kfree_rcu()</tt>,
-which allows &ldquo;fire and forget&rdquo; operation as shown below:
-
-<blockquote>
-<pre>
- 1 struct foo {
- 2   int a;
- 3   int b;
- 4   struct rcu_head rh;
- 5 };
- 6
- 7 bool remove_gp_faf(void)
- 8 {
- 9   struct foo *p;
-10
-11   spin_lock(&amp;gp_lock);
-12   p = rcu_dereference(gp);
-13   if (!p) {
-14     spin_unlock(&amp;gp_lock);
-15     return false;
-16   }
-17   rcu_assign_pointer(gp, NULL);
-18   kfree_rcu(p, rh);
-19   spin_unlock(&amp;gp_lock);
-20   return true;
-21 }
-</pre>
-</blockquote>
-
-<p>
-Note that <tt>remove_gp_faf()</tt> simply invokes
-<tt>kfree_rcu()</tt> and proceeds, without any need to pay any
-further attention to the subsequent grace period and <tt>kfree()</tt>.
-It is permissible to invoke <tt>kfree_rcu()</tt> from the same
-environments as for <tt>call_rcu()</tt>.
-Interestingly enough, DYNIX/ptx had the equivalents of
-<tt>call_rcu()</tt> and <tt>kfree_rcu()</tt>, but not
-<tt>synchronize_rcu()</tt>.
-This was due to the fact that RCU was not heavily used within DYNIX/ptx,
-so the very few places that needed something like
-<tt>synchronize_rcu()</tt> simply open-coded it.
-
-<p>@@QQ@@
-Earlier it was claimed that <tt>call_rcu()</tt> and
-<tt>kfree_rcu()</tt> allowed updaters to avoid being blocked
-by readers.
-But how can that be correct, given that the invocation of the callback
-and the freeing of the memory (respectively) must still wait for
-a grace period to elapse?
-<p>@@QQA@@
-We could define things this way, but keep in mind that this sort of
-definition would say that updates in garbage-collected languages
-cannot complete until the next time the garbage collector runs,
-which does not seem at all reasonable.
-The key point is that in most cases, an updater using either
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> can proceed to the
-next update as soon as it has invoked <tt>call_rcu()</tt> or
-<tt>kfree_rcu()</tt>, without having to wait for a subsequent
-grace period.
-<p>@@QQE@@
-
-<p>
-But what if the updater must wait for the completion of code to be
-executed after the end of the grace period, but has other tasks
-that can be carried out in the meantime?
-The polling-style <tt>get_state_synchronize_rcu()</tt> and
-<tt>cond_synchronize_rcu()</tt> functions may be used for this
-purpose, as shown below:
-
-<blockquote>
-<pre>
- 1 bool remove_gp_poll(void)
- 2 {
- 3   struct foo *p;
- 4   unsigned long s;
- 5
- 6   spin_lock(&amp;gp_lock);
- 7   p = rcu_access_pointer(gp);
- 8   if (!p) {
- 9     spin_unlock(&amp;gp_lock);
-10     return false;
-11   }
-12   rcu_assign_pointer(gp, NULL);
-13   spin_unlock(&amp;gp_lock);
-14   s = get_state_synchronize_rcu();
-15   do_something_while_waiting();
-16   cond_synchronize_rcu(s);
-17   kfree(p);
-18   return true;
-19 }
-</pre>
-</blockquote>
-
-<p>
-On line&nbsp;14, <tt>get_state_synchronize_rcu()</tt> obtains a
-&ldquo;cookie&rdquo; from RCU,
-then line&nbsp;15 carries out other tasks,
-and finally, line&nbsp;16 returns immediately if a grace period has
-elapsed in the meantime, but otherwise waits as required.
-The need for <tt>get_state_synchronize_rcu</tt> and
-<tt>cond_synchronize_rcu()</tt> has appeared quite recently,
-so it is too early to tell whether they will stand the test of time.
-
-<p>
-RCU thus provides a range of tools to allow updaters to strike the
-required tradeoff between latency, flexibility and CPU overhead.
-
-<h3><a name="Composability">Composability</a></h3>
-
-<p>
-Composability has received much attention in recent years, perhaps in part
-due to the collision of multicore hardware with object-oriented techniques
-designed in single-threaded environments for single-threaded use.
-And in theory, RCU read-side critical sections may be composed, and in
-fact may be nested arbitrarily deeply.
-In practice, as with all real-world implementations of composable
-constructs, there are limitations.
-
-<p>
-Implementations of RCU for which <tt>rcu_read_lock()</tt>
-and <tt>rcu_read_unlock()</tt> generate no code, such as
-Linux-kernel RCU when <tt>CONFIG_PREEMPT=n</tt>, can be
-nested arbitrarily deeply.
-After all, there is no overhead.
-Except that if all these instances of <tt>rcu_read_lock()</tt>
-and <tt>rcu_read_unlock()</tt> are visible to the compiler,
-compilation will eventually fail due to exhausting memory,
-mass storage, or user patience, whichever comes first.
-If the nesting is not visible to the compiler, as is the case with
-mutually recursive functions each in its own translation unit,
-stack overflow will result.
-If the nesting takes the form of loops, either the control variable
-will overflow or (in the Linux kernel) you will get an RCU CPU stall warning.
-Nevertheless, this class of RCU implementations is one
-of the most composable constructs in existence.
-
-<p>
-RCU implementations that explicitly track nesting depth
-are limited by the nesting-depth counter.
-For example, the Linux kernel's preemptible RCU limits nesting to
-<tt>INT_MAX</tt>.
-This should suffice for almost all practical purposes.
-That said, a consecutive pair of RCU read-side critical sections
-between which there is an operation that waits for a grace period
-cannot be enclosed in another RCU read-side critical section.
-This is because it is not legal to wait for a grace period within
-an RCU read-side critical section:  To do so would result either
-in deadlock or
-in RCU implicitly splitting the enclosing RCU read-side critical
-section, neither of which is conducive to a long-lived and prosperous
-kernel.
-
-<p>
-It is worth noting that RCU is not alone in limiting composability.
-For example, many transactional-memory implementations prohibit
-composing a pair of transactions separated by an irrevocable
-operation (for example, a network receive operation).
-For another example, lock-based critical sections can be composed
-surprisingly freely, but only if deadlock is avoided.
-
-<p>
-In short, although RCU read-side critical sections are highly composable,
-care is required in some situations, just as is the case for any other
-composable synchronization mechanism.
-
-<h3><a name="Corner Cases">Corner Cases</a></h3>
-
-<p>
-A given RCU workload might have an endless and intense stream of
-RCU read-side critical sections, perhaps even so intense that there
-was never a point in time during which there was not at least one
-RCU read-side critical section in flight.
-RCU cannot allow this situation to block grace periods:  As long as
-all the RCU read-side critical sections are finite, grace periods
-must also be finite.
-
-<p>
-That said, preemptible RCU implementations could potentially result
-in RCU read-side critical sections being preempted for long durations,
-which has the effect of creating a long-duration RCU read-side
-critical section.
-This situation can arise only in heavily loaded systems, but systems using
-real-time priorities are of course more vulnerable.
-Therefore, RCU priority boosting is provided to help deal with this
-case.
-That said, the exact requirements on RCU priority boosting will likely
-evolve as more experience accumulates.
-
-<p>
-Other workloads might have very high update rates.
-Although one can argue that such workloads should instead use
-something other than RCU, the fact remains that RCU must
-handle such workloads gracefully.
-This requirement is another factor driving batching of grace periods,
-but it is also the driving force behind the checks for large numbers
-of queued RCU callbacks in the <tt>call_rcu()</tt> code path.
-Finally, high update rates should not delay RCU read-side critical
-sections, although some read-side delays can occur when using
-<tt>synchronize_rcu_expedited()</tt>, courtesy of this function's use
-of <tt>try_stop_cpus()</tt>.
-(In the future, <tt>synchronize_rcu_expedited()</tt> will be
-converted to use lighter-weight inter-processor interrupts (IPIs),
-but this will still disturb readers, though to a much smaller degree.)
-
-<p>
-Although all three of these corner cases were understood in the early
-1990s, a simple user-level test consisting of <tt>close(open(path))</tt>
-in a tight loop
-in the early 2000s suddenly provided a much deeper appreciation of the
-high-update-rate corner case.
-This test also motivated addition of some RCU code to react to high update
-rates, for example, if a given CPU finds itself with more than 10,000
-RCU callbacks queued, it will cause RCU to take evasive action by
-more aggressively starting grace periods and more aggressively forcing
-completion of grace-period processing.
-This evasive action causes the grace period to complete more quickly,
-but at the cost of restricting RCU's batching optimizations, thus
-increasing the CPU overhead incurred by that grace period.
-
-<h2><a name="Software-Engineering Requirements">
-Software-Engineering Requirements</a></h2>
-
-<p>
-Between Murphy's Law and &ldquo;To err is human&rdquo;, it is necessary to
-guard against mishaps and misuse:
-
-<ol>
-<li>   It is all too easy to forget to use <tt>rcu_read_lock()</tt>
-       everywhere that it is needed, so kernels built with
-       <tt>CONFIG_PROVE_RCU=y</tt> will spat if
-       <tt>rcu_dereference()</tt> is used outside of an
-       RCU read-side critical section.
-       Update-side code can use <tt>rcu_dereference_protected()</tt>,
-       which takes a
-       <a href="https://lwn.net/Articles/371986/">lockdep expression</a>
-       to indicate what is providing the protection.
-       If the indicated protection is not provided, a lockdep splat
-       is emitted.
-
-       <p>
-       Code shared between readers and updaters can use
-       <tt>rcu_dereference_check()</tt>, which also takes a
-       lockdep expression, and emits a lockdep splat if neither
-       <tt>rcu_read_lock()</tt> nor the indicated protection
-       is in place.
-       In addition, <tt>rcu_dereference_raw()</tt> is used in those
-       (hopefully rare) cases where the required protection cannot
-       be easily described.
-       Finally, <tt>rcu_read_lock_held()</tt> is provided to
-       allow a function to verify that it has been invoked within
-       an RCU read-side critical section.
-       I was made aware of this set of requirements shortly after Thomas
-       Gleixner audited a number of RCU uses.
-<li>   A given function might wish to check for RCU-related preconditions
-       upon entry, before using any other RCU API.
-       The <tt>rcu_lockdep_assert()</tt> does this job,
-       asserting the expression in kernels having lockdep enabled
-       and doing nothing otherwise.
-<li>   It is also easy to forget to use <tt>rcu_assign_pointer()</tt>
-       and <tt>rcu_dereference()</tt>, perhaps (incorrectly)
-       substituting a simple assignment.
-       To catch this sort of error, a given RCU-protected pointer may be
-       tagged with <tt>__rcu</tt>, after which running sparse
-       with <tt>CONFIG_SPARSE_RCU_POINTER=y</tt> will complain
-       about simple-assignment accesses to that pointer.
-       Arnd Bergmann made me aware of this requirement, and also
-       supplied the needed
-       <a href="https://lwn.net/Articles/376011/">patch series</a>.
-<li>   Kernels built with <tt>CONFIG_DEBUG_OBJECTS_RCU_HEAD=y</tt>
-       will splat if a data element is passed to <tt>call_rcu()</tt>
-       twice in a row, without a grace period in between.
-       (This error is similar to a double free.)
-       The corresponding <tt>rcu_head</tt> structures that are
-       dynamically allocated are automatically tracked, but
-       <tt>rcu_head</tt> structures allocated on the stack
-       must be initialized with <tt>init_rcu_head_on_stack()</tt>
-       and cleaned up with <tt>destroy_rcu_head_on_stack()</tt>.
-       Similarly, statically allocated non-stack <tt>rcu_head</tt>
-       structures must be initialized with <tt>init_rcu_head()</tt>
-       and cleaned up with <tt>destroy_rcu_head()</tt>.
-       Mathieu Desnoyers made me aware of this requirement, and also
-       supplied the needed
-       <a href="https://lkml.kernel.org/g/20100319013024.GA28456@Krystal">patch</a>.
-<li>   An infinite loop in an RCU read-side critical section will
-       eventually trigger an RCU CPU stall warning splat, with
-       the duration of &ldquo;eventually&rdquo; being controlled by the
-       <tt>RCU_CPU_STALL_TIMEOUT</tt> <tt>Kconfig</tt> option, or,
-       alternatively, by the
-       <tt>rcupdate.rcu_cpu_stall_timeout</tt> boot/sysfs
-       parameter.
-       However, RCU is not obligated to produce this splat
-       unless there is a grace period waiting on that particular
-       RCU read-side critical section.
-       <p>
-       Some extreme workloads might intentionally delay
-       RCU grace periods, and systems running those workloads can
-       be booted with <tt>rcupdate.rcu_cpu_stall_suppress</tt>
-       to suppress the splats.
-       This kernel parameter may also be set via <tt>sysfs</tt>.
-       Furthermore, RCU CPU stall warnings are counter-productive
-       during sysrq dumps and during panics.
-       RCU therefore supplies the <tt>rcu_sysrq_start()</tt> and
-       <tt>rcu_sysrq_end()</tt> API members to be called before
-       and after long sysrq dumps.
-       RCU also supplies the <tt>rcu_panic()</tt> notifier that is
-       automatically invoked at the beginning of a panic to suppress
-       further RCU CPU stall warnings.
-
-       <p>
-       This requirement made itself known in the early 1990s, pretty
-       much the first time that it was necessary to debug a CPU stall.
-       That said, the initial implementation in DYNIX/ptx was quite
-       generic in comparison with that of Linux.
-<li>   Although it would be very good to detect pointers leaking out
-       of RCU read-side critical sections, there is currently no
-       good way of doing this.
-       One complication is the need to distinguish between pointers
-       leaking and pointers that have been handed off from RCU to
-       some other synchronization mechanism, for example, reference
-       counting.
-<li>   In kernels built with <tt>CONFIG_RCU_TRACE=y</tt>, RCU-related
-       information is provided via both debugfs and event tracing.
-<li>   Open-coded use of <tt>rcu_assign_pointer()</tt> and
-       <tt>rcu_dereference()</tt> to create typical linked
-       data structures can be surprisingly error-prone.
-       Therefore, RCU-protected
-       <a href="https://lwn.net/Articles/609973/#RCU List APIs">linked lists</a>
-       and, more recently, RCU-protected
-       <a href="https://lwn.net/Articles/612100/">hash tables</a>
-       are available.
-       Many other special-purpose RCU-protected data structures are
-       available in the Linux kernel and the userspace RCU library.
-<li>   Some linked structures are created at compile time, but still
-       require <tt>__rcu</tt> checking.
-       The <tt>RCU_POINTER_INITIALIZER()</tt> macro serves this
-       purpose.
-<li>   It is not necessary to use <tt>rcu_assign_pointer()</tt>
-       when creating linked structures that are to be published via
-       a single external pointer.
-       The <tt>RCU_INIT_POINTER()</tt> macro is provided for
-       this task and also for assigning <tt>NULL</tt> pointers
-       at runtime.
-</ol>
-
-<p>
-This not a hard-and-fast list:  RCU's diagnostic capabilities will
-continue to be guided by the number and type of usage bugs found
-in real-world RCU usage.
-
-<h2><a name="Linux Kernel Complications">Linux Kernel Complications</a></h2>
-
-<p>
-The Linux kernel provides an interesting environment for all kinds of
-software, including RCU.
-Some of the relevant points of interest are as follows:
-
-<ol>
-<li>   <a href="#Configuration">Configuration</a>.
-<li>   <a href="#Firmware Interface">Firmware Interface</a>.
-<li>   <a href="#Early Boot">Early Boot</a>.
-<li>   <a href="#Interrupts and NMIs">
-       Interrupts and non-maskable interrupts (NMIs)</a>.
-<li>   <a href="#Loadable Modules">Loadable Modules</a>.
-<li>   <a href="#Hotplug CPU">Hotplug CPU</a>.
-<li>   <a href="#Scheduler and RCU">Scheduler and RCU</a>.
-<li>   <a href="#Tracing and RCU">Tracing and RCU</a>.
-<li>   <a href="#Energy Efficiency">Energy Efficiency</a>.
-<li>   <a href="#Memory Efficiency">Memory Efficiency</a>.
-<li>   <a href="#Performance, Scalability, Response Time, and Reliability">
-       Performance, Scalability, Response Time, and Reliability</a>.
-</ol>
-
-<p>
-This list is probably incomplete, but it does give a feel for the
-most notable Linux-kernel complications.
-Each of the following sections covers one of the above topics.
-
-<h3><a name="Configuration">Configuration</a></h3>
-
-<p>
-RCU's goal is automatic configuration, so that almost nobody
-needs to worry about RCU's <tt>Kconfig</tt> options.
-And for almost all users, RCU does in fact work well
-&ldquo;out of the box.&rdquo;
-
-<p>
-However, there are specialized use cases that are handled by
-kernel boot parameters and <tt>Kconfig</tt> options.
-Unfortunately, the <tt>Kconfig</tt> system will explicitly ask users
-about new <tt>Kconfig</tt> options, which requires almost all of them
-be hidden behind a <tt>CONFIG_RCU_EXPERT</tt> <tt>Kconfig</tt> option.
-
-<p>
-This all should be quite obvious, but the fact remains that
-Linus Torvalds recently had to
-<a href="https://lkml.kernel.org/g/CA+55aFy4wcCwaL4okTs8wXhGZ5h-ibecy_Meg9C4MNQrUnwMcg@mail.gmail.com">remind</a>
-me of this requirement.
-
-<h3><a name="Firmware Interface">Firmware Interface</a></h3>
-
-<p>
-In many cases, kernel obtains information about the system from the
-firmware, and sometimes things are lost in translation.
-Or the translation is accurate, but the original message is bogus.
-
-<p>
-For example, some systems' firmware overreports the number of CPUs,
-sometimes by a large factor.
-If RCU naively believed the firmware, as it used to do,
-it would create too many per-CPU kthreads.
-Although the resulting system will still run correctly, the extra
-kthreads needlessly consume memory and can cause confusion
-when they show up in <tt>ps</tt> listings.
-
-<p>
-RCU must therefore wait for a given CPU to actually come online before
-it can allow itself to believe that the CPU actually exists.
-The resulting &ldquo;ghost CPUs&rdquo; (which are never going to
-come online) cause a number of
-<a href="https://paulmck.livejournal.com/37494.html">interesting complications</a>.
-
-<h3><a name="Early Boot">Early Boot</a></h3>
-
-<p>
-The Linux kernel's boot sequence is an interesting process,
-and RCU is used early, even before <tt>rcu_init()</tt>
-is invoked.
-In fact, a number of RCU's primitives can be used as soon as the
-initial task's <tt>task_struct</tt> is available and the
-boot CPU's per-CPU variables are set up.
-The read-side primitives (<tt>rcu_read_lock()</tt>,
-<tt>rcu_read_unlock()</tt>, <tt>rcu_dereference()</tt>,
-and <tt>rcu_access_pointer()</tt>) will operate normally very early on,
-as will <tt>rcu_assign_pointer()</tt>.
-
-<p>
-Although <tt>call_rcu()</tt> may be invoked at any
-time during boot, callbacks are not guaranteed to be invoked until after
-the scheduler is fully up and running.
-This delay in callback invocation is due to the fact that RCU does not
-invoke callbacks until it is fully initialized, and this full initialization
-cannot occur until after the scheduler has initialized itself to the
-point where RCU can spawn and run its kthreads.
-In theory, it would be possible to invoke callbacks earlier,
-however, this is not a panacea because there would be severe restrictions
-on what operations those callbacks could invoke.
-
-<p>
-Perhaps surprisingly, <tt>synchronize_rcu()</tt>,
-<a href="#Bottom-Half Flavor"><tt>synchronize_rcu_bh()</tt></a>
-(<a href="#Bottom-Half Flavor">discussed below</a>),
-and
-<a href="#Sched Flavor"><tt>synchronize_sched()</tt></a>
-will all operate normally
-during very early boot, the reason being that there is only one CPU
-and preemption is disabled.
-This means that the call <tt>synchronize_rcu()</tt> (or friends)
-itself is a quiescent
-state and thus a grace period, so the early-boot implementation can
-be a no-op.
-
-<p>
-Both <tt>synchronize_rcu_bh()</tt> and <tt>synchronize_sched()</tt>
-continue to operate normally through the remainder of boot, courtesy
-of the fact that preemption is disabled across their RCU read-side
-critical sections and also courtesy of the fact that there is still
-only one CPU.
-However, once the scheduler starts initializing, preemption is enabled.
-There is still only a single CPU, but the fact that preemption is enabled
-means that the no-op implementation of <tt>synchronize_rcu()</tt> no
-longer works in <tt>CONFIG_PREEMPT=y</tt> kernels.
-Therefore, as soon as the scheduler starts initializing, the early-boot
-fastpath is disabled.
-This means that <tt>synchronize_rcu()</tt> switches to its runtime
-mode of operation where it posts callbacks, which in turn means that
-any call to <tt>synchronize_rcu()</tt> will block until the corresponding
-callback is invoked.
-Unfortunately, the callback cannot be invoked until RCU's runtime
-grace-period machinery is up and running, which cannot happen until
-the scheduler has initialized itself sufficiently to allow RCU's
-kthreads to be spawned.
-Therefore, invoking <tt>synchronize_rcu()</tt> during scheduler
-initialization can result in deadlock.
-
-<p>@@QQ@@
-So what happens with <tt>synchronize_rcu()</tt> during
-scheduler initialization for <tt>CONFIG_PREEMPT=n</tt>
-kernels?
-<p>@@QQA@@
-In <tt>CONFIG_PREEMPT=n</tt> kernel, <tt>synchronize_rcu()</tt>
-maps directly to <tt>synchronize_sched()</tt>.
-Therefore, <tt>synchronize_rcu()</tt> works normally throughout
-boot in <tt>CONFIG_PREEMPT=n</tt> kernels.
-However, your code must also work in <tt>CONFIG_PREEMPT=y</tt> kernels,
-so it is still necessary to avoid invoking <tt>synchronize_rcu()</tt>
-during scheduler initialization.
-<p>@@QQE@@
-
-<p>
-I learned of these boot-time requirements as a result of a series of
-system hangs.
-
-<h3><a name="Interrupts and NMIs">Interrupts and NMIs</a></h3>
-
-<p>
-The Linux kernel has interrupts, and RCU read-side critical sections are
-legal within interrupt handlers and within interrupt-disabled regions
-of code, as are invocations of <tt>call_rcu()</tt>.
-
-<p>
-Some Linux-kernel architectures can enter an interrupt handler from
-non-idle process context, and then just never leave it, instead stealthily
-transitioning back to process context.
-This trick is sometimes used to invoke system calls from inside the kernel.
-These &ldquo;half-interrupts&rdquo; mean that RCU has to be very careful
-about how it counts interrupt nesting levels.
-I learned of this requirement the hard way during a rewrite
-of RCU's dyntick-idle code.
-
-<p>
-The Linux kernel has non-maskable interrupts (NMIs), and
-RCU read-side critical sections are legal within NMI handlers.
-Thankfully, RCU update-side primitives, including
-<tt>call_rcu()</tt>, are prohibited within NMI handlers.
-
-<p>
-The name notwithstanding, some Linux-kernel architectures
-can have nested NMIs, which RCU must handle correctly.
-Andy Lutomirski
-<a href="https://lkml.kernel.org/g/CALCETrXLq1y7e_dKFPgou-FKHB6Pu-r8+t-6Ds+8=va7anBWDA@mail.gmail.com">surprised me</a>
-with this requirement;
-he also kindly surprised me with
-<a href="https://lkml.kernel.org/g/CALCETrXSY9JpW3uE6H8WYk81sg56qasA2aqmjMPsq5dOtzso=g@mail.gmail.com">an algorithm</a>
-that meets this requirement.
-
-<h3><a name="Loadable Modules">Loadable Modules</a></h3>
-
-<p>
-The Linux kernel has loadable modules, and these modules can
-also be unloaded.
-After a given module has been unloaded, any attempt to call
-one of its functions results in a segmentation fault.
-The module-unload functions must therefore cancel any
-delayed calls to loadable-module functions, for example,
-any outstanding <tt>mod_timer()</tt> must be dealt with
-via <tt>del_timer_sync()</tt> or similar.
-
-<p>
-Unfortunately, there is no way to cancel an RCU callback;
-once you invoke <tt>call_rcu()</tt>, the callback function is
-going to eventually be invoked, unless the system goes down first.
-Because it is normally considered socially irresponsible to crash the system
-in response to a module unload request, we need some other way
-to deal with in-flight RCU callbacks.
-
-<p>
-RCU therefore provides
-<tt><a href="https://lwn.net/Articles/217484/">rcu_barrier()</a></tt>,
-which waits until all in-flight RCU callbacks have been invoked.
-If a module uses <tt>call_rcu()</tt>, its exit function should therefore
-prevent any future invocation of <tt>call_rcu()</tt>, then invoke
-<tt>rcu_barrier()</tt>.
-In theory, the underlying module-unload code could invoke
-<tt>rcu_barrier()</tt> unconditionally, but in practice this would
-incur unacceptable latencies.
-
-<p>
-Nikita Danilov noted this requirement for an analogous filesystem-unmount
-situation, and Dipankar Sarma incorporated <tt>rcu_barrier()</tt> into RCU.
-The need for <tt>rcu_barrier()</tt> for module unloading became
-apparent later.
-
-<h3><a name="Hotplug CPU">Hotplug CPU</a></h3>
-
-<p>
-The Linux kernel supports CPU hotplug, which means that CPUs
-can come and go.
-It is of course illegal to use any RCU API member from an offline CPU.
-This requirement was present from day one in DYNIX/ptx, but
-on the other hand, the Linux kernel's CPU-hotplug implementation
-is &ldquo;interesting.&rdquo;
-
-<p>
-The Linux-kernel CPU-hotplug implementation has notifiers that
-are used to allow the various kernel subsystems (including RCU)
-to respond appropriately to a given CPU-hotplug operation.
-Most RCU operations may be invoked from CPU-hotplug notifiers,
-including even normal synchronous grace-period operations
-such as <tt>synchronize_rcu()</tt>.
-However, expedited grace-period operations such as
-<tt>synchronize_rcu_expedited()</tt> are not supported,
-due to the fact that current implementations block CPU-hotplug
-operations, which could result in deadlock.
-
-<p>
-In addition, all-callback-wait operations such as
-<tt>rcu_barrier()</tt> are also not supported, due to the
-fact that there are phases of CPU-hotplug operations where
-the outgoing CPU's callbacks will not be invoked until after
-the CPU-hotplug operation ends, which could also result in deadlock.
-
-<h3><a name="Scheduler and RCU">Scheduler and RCU</a></h3>
-
-<p>
-RCU depends on the scheduler, and the scheduler uses RCU to
-protect some of its data structures.
-This means the scheduler is forbidden from acquiring
-the runqueue locks and the priority-inheritance locks
-in the middle of an outermost RCU read-side critical section unless either
-(1)&nbsp;it releases them before exiting that same
-RCU read-side critical section, or
-(2)&nbsp;interrupts are disabled across
-that entire RCU read-side critical section.
-This same prohibition also applies (recursively!) to any lock that is acquired
-while holding any lock to which this prohibition applies.
-Adhering to this rule prevents preemptible RCU from invoking
-<tt>rcu_read_unlock_special()</tt> while either runqueue or
-priority-inheritance locks are held, thus avoiding deadlock.
-
-<p>
-Prior to v4.4, it was only necessary to disable preemption across
-RCU read-side critical sections that acquired scheduler locks.
-In v4.4, expedited grace periods started using IPIs, and these
-IPIs could force a <tt>rcu_read_unlock()</tt> to take the slowpath.
-Therefore, this expedited-grace-period change required disabling of
-interrupts, not just preemption.
-
-<p>
-For RCU's part, the preemptible-RCU <tt>rcu_read_unlock()</tt>
-implementation must be written carefully to avoid similar deadlocks.
-In particular, <tt>rcu_read_unlock()</tt> must tolerate an
-interrupt where the interrupt handler invokes both
-<tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>.
-This possibility requires <tt>rcu_read_unlock()</tt> to use
-negative nesting levels to avoid destructive recursion via
-interrupt handler's use of RCU.
-
-<p>
-This pair of mutual scheduler-RCU requirements came as a
-<a href="https://lwn.net/Articles/453002/">complete surprise</a>.
-
-<p>
-As noted above, RCU makes use of kthreads, and it is necessary to
-avoid excessive CPU-time accumulation by these kthreads.
-This requirement was no surprise, but RCU's violation of it
-when running context-switch-heavy workloads when built with
-<tt>CONFIG_NO_HZ_FULL=y</tt>
-<a href="http://www.rdrop.com/users/paulmck/scalability/paper/BareMetal.2015.01.15b.pdf">did come as a surprise [PDF]</a>.
-RCU has made good progress towards meeting this requirement, even
-for context-switch-have <tt>CONFIG_NO_HZ_FULL=y</tt> workloads,
-but there is room for further improvement.
-
-<h3><a name="Tracing and RCU">Tracing and RCU</a></h3>
-
-<p>
-It is possible to use tracing on RCU code, but tracing itself
-uses RCU.
-For this reason, <tt>rcu_dereference_raw_notrace()</tt>
-is provided for use by tracing, which avoids the destructive
-recursion that could otherwise ensue.
-This API is also used by virtualization in some architectures,
-where RCU readers execute in environments in which tracing
-cannot be used.
-The tracing folks both located the requirement and provided the
-needed fix, so this surprise requirement was relatively painless.
-
-<h3><a name="Energy Efficiency">Energy Efficiency</a></h3>
-
-<p>
-Interrupting idle CPUs is considered socially unacceptable,
-especially by people with battery-powered embedded systems.
-RCU therefore conserves energy by detecting which CPUs are
-idle, including tracking CPUs that have been interrupted from idle.
-This is a large part of the energy-efficiency requirement,
-so I learned of this via an irate phone call.
-
-<p>
-Because RCU avoids interrupting idle CPUs, it is illegal to
-execute an RCU read-side critical section on an idle CPU.
-(Kernels built with <tt>CONFIG_PROVE_RCU=y</tt> will splat
-if you try it.)
-The <tt>RCU_NONIDLE()</tt> macro and <tt>_rcuidle</tt>
-event tracing is provided to work around this restriction.
-In addition, <tt>rcu_is_watching()</tt> may be used to
-test whether or not it is currently legal to run RCU read-side
-critical sections on this CPU.
-I learned of the need for diagnostics on the one hand
-and <tt>RCU_NONIDLE()</tt> on the other while inspecting
-idle-loop code.
-Steven Rostedt supplied <tt>_rcuidle</tt> event tracing,
-which is used quite heavily in the idle loop.
-
-<p>
-It is similarly socially unacceptable to interrupt an
-<tt>nohz_full</tt> CPU running in userspace.
-RCU must therefore track <tt>nohz_full</tt> userspace
-execution.
-And in
-<a href="https://lwn.net/Articles/558284/"><tt>CONFIG_NO_HZ_FULL_SYSIDLE=y</tt></a>
-kernels, RCU must separately track idle CPUs on the one hand and
-CPUs that are either idle or executing in userspace on the other.
-In both cases, RCU must be able to sample state at two points in
-time, and be able to determine whether or not some other CPU spent
-any time idle and/or executing in userspace.
-
-<p>
-These energy-efficiency requirements have proven quite difficult to
-understand and to meet, for example, there have been more than five
-clean-sheet rewrites of RCU's energy-efficiency code, the last of
-which was finally able to demonstrate
-<a href="http://www.rdrop.com/users/paulmck/realtime/paper/AMPenergy.2013.04.19a.pdf">real energy savings running on real hardware [PDF]</a>.
-As noted earlier,
-I learned of many of these requirements via angry phone calls:
-Flaming me on the Linux-kernel mailing list was apparently not
-sufficient to fully vent their ire at RCU's energy-efficiency bugs!
-
-<h3><a name="Memory Efficiency">Memory Efficiency</a></h3>
-
-<p>
-Although small-memory non-realtime systems can simply use Tiny RCU,
-code size is only one aspect of memory efficiency.
-Another aspect is the size of the <tt>rcu_head</tt> structure
-used by <tt>call_rcu()</tt> and <tt>kfree_rcu()</tt>.
-Although this structure contains nothing more than a pair of pointers,
-it does appear in many RCU-protected data structures, including
-some that are size critical.
-The <tt>page</tt> structure is a case in point, as evidenced by
-the many occurrences of the <tt>union</tt> keyword within that structure.
-
-<p>
-This need for memory efficiency is one reason that RCU uses hand-crafted
-singly linked lists to track the <tt>rcu_head</tt> structures that
-are waiting for a grace period to elapse.
-It is also the reason why <tt>rcu_head</tt> structures do not contain
-debug information, such as fields tracking the file and line of the
-<tt>call_rcu()</tt> or <tt>kfree_rcu()</tt> that posted them.
-Although this information might appear in debug-only kernel builds at some
-point, in the meantime, the <tt>-&gt;func</tt> field will often provide
-the needed debug information.
-
-<p>
-However, in some cases, the need for memory efficiency leads to even
-more extreme measures.
-Returning to the <tt>page</tt> structure, the <tt>rcu_head</tt> field
-shares storage with a great many other structures that are used at
-various points in the corresponding page's lifetime.
-In order to correctly resolve certain
-<a href="https://lkml.kernel.org/g/1439976106-137226-1-git-send-email-kirill.shutemov@linux.intel.com">race conditions</a>,
-the Linux kernel's memory-management subsystem needs a particular bit
-to remain zero during all phases of grace-period processing,
-and that bit happens to map to the bottom bit of the
-<tt>rcu_head</tt> structure's <tt>-&gt;next</tt> field.
-RCU makes this guarantee as long as <tt>call_rcu()</tt>
-is used to post the callback, as opposed to <tt>kfree_rcu()</tt>
-or some future &ldquo;lazy&rdquo;
-variant of <tt>call_rcu()</tt> that might one day be created for
-energy-efficiency purposes.
-
-<h3><a name="Performance, Scalability, Response Time, and Reliability">
-Performance, Scalability, Response Time, and Reliability</a></h3>
-
-<p>
-Expanding on the
-<a href="#Performance and Scalability">earlier discussion</a>,
-RCU is used heavily by hot code paths in performance-critical
-portions of the Linux kernel's networking, security, virtualization,
-and scheduling code paths.
-RCU must therefore use efficient implementations, especially in its
-read-side primitives.
-To that end, it would be good if preemptible RCU's implementation
-of <tt>rcu_read_lock()</tt> could be inlined, however, doing
-this requires resolving <tt>#include</tt> issues with the
-<tt>task_struct</tt> structure.
-
-<p>
-The Linux kernel supports hardware configurations with up to
-4096 CPUs, which means that RCU must be extremely scalable.
-Algorithms that involve frequent acquisitions of global locks or
-frequent atomic operations on global variables simply cannot be
-tolerated within the RCU implementation.
-RCU therefore makes heavy use of a combining tree based on the
-<tt>rcu_node</tt> structure.
-RCU is required to tolerate all CPUs continuously invoking any
-combination of RCU's runtime primitives with minimal per-operation
-overhead.
-In fact, in many cases, increasing load must <i>decrease</i> the
-per-operation overhead, witness the batching optimizations for
-<tt>synchronize_rcu()</tt>, <tt>call_rcu()</tt>,
-<tt>synchronize_rcu_expedited()</tt>, and <tt>rcu_barrier()</tt>.
-As a general rule, RCU must cheerfully accept whatever the
-rest of the Linux kernel decides to throw at it.
-
-<p>
-The Linux kernel is used for real-time workloads, especially
-in conjunction with the
-<a href="https://rt.wiki.kernel.org/index.php/Main_Page">-rt patchset</a>.
-The real-time-latency response requirements are such that the
-traditional approach of disabling preemption across RCU
-read-side critical sections is inappropriate.
-Kernels built with <tt>CONFIG_PREEMPT=y</tt> therefore
-use an RCU implementation that allows RCU read-side critical
-sections to be preempted.
-This requirement made its presence known after users made it
-clear that an earlier
-<a href="https://lwn.net/Articles/107930/">real-time patch</a>
-did not meet their needs, in conjunction with some
-<a href="https://lkml.kernel.org/g/20050318002026.GA2693@us.ibm.com">RCU issues</a>
-encountered by a very early version of the -rt patchset.
-
-<p>
-In addition, RCU must make do with a sub-100-microsecond real-time latency
-budget.
-In fact, on smaller systems with the -rt patchset, the Linux kernel
-provides sub-20-microsecond real-time latencies for the whole kernel,
-including RCU.
-RCU's scalability and latency must therefore be sufficient for
-these sorts of configurations.
-To my surprise, the sub-100-microsecond real-time latency budget
-<a href="http://www.rdrop.com/users/paulmck/realtime/paper/bigrt.2013.01.31a.LCA.pdf">
-applies to even the largest systems [PDF]</a>,
-up to and including systems with 4096 CPUs.
-This real-time requirement motivated the grace-period kthread, which
-also simplified handling of a number of race conditions.
-
-<p>
-Finally, RCU's status as a synchronization primitive means that
-any RCU failure can result in arbitrary memory corruption that can be
-extremely difficult to debug.
-This means that RCU must be extremely reliable, which in
-practice also means that RCU must have an aggressive stress-test
-suite.
-This stress-test suite is called <tt>rcutorture</tt>.
-
-<p>
-Although the need for <tt>rcutorture</tt> was no surprise,
-the current immense popularity of the Linux kernel is posing
-interesting&mdash;and perhaps unprecedented&mdash;validation
-challenges.
-To see this, keep in mind that there are well over one billion
-instances of the Linux kernel running today, given Android
-smartphones, Linux-powered televisions, and servers.
-This number can be expected to increase sharply with the advent of
-the celebrated Internet of Things.
-
-<p>
-Suppose that RCU contains a race condition that manifests on average
-once per million years of runtime.
-This bug will be occurring about three times per <i>day</i> across
-the installed base.
-RCU could simply hide behind hardware error rates, given that no one
-should really expect their smartphone to last for a million years.
-However, anyone taking too much comfort from this thought should
-consider the fact that in most jurisdictions, a successful multi-year
-test of a given mechanism, which might include a Linux kernel,
-suffices for a number of types of safety-critical certifications.
-In fact, rumor has it that the Linux kernel is already being used
-in production for safety-critical applications.
-I don't know about you, but I would feel quite bad if a bug in RCU
-killed someone.
-Which might explain my recent focus on validation and verification.
-
-<h2><a name="Other RCU Flavors">Other RCU Flavors</a></h2>
-
-<p>
-One of the more surprising things about RCU is that there are now
-no fewer than five <i>flavors</i>, or API families.
-In addition, the primary flavor that has been the sole focus up to
-this point has two different implementations, non-preemptible and
-preemptible.
-The other four flavors are listed below, with requirements for each
-described in a separate section.
-
-<ol>
-<li>   <a href="#Bottom-Half Flavor">Bottom-Half Flavor</a>
-<li>   <a href="#Sched Flavor">Sched Flavor</a>
-<li>   <a href="#Sleepable RCU">Sleepable RCU</a>
-<li>   <a href="#Tasks RCU">Tasks RCU</a>
-</ol>
-
-<h3><a name="Bottom-Half Flavor">Bottom-Half Flavor</a></h3>
-
-<p>
-The softirq-disable (AKA &ldquo;bottom-half&rdquo;,
-hence the &ldquo;_bh&rdquo; abbreviations)
-flavor of RCU, or <i>RCU-bh</i>, was developed by
-Dipankar Sarma to provide a flavor of RCU that could withstand the
-network-based denial-of-service attacks researched by Robert
-Olsson.
-These attacks placed so much networking load on the system
-that some of the CPUs never exited softirq execution,
-which in turn prevented those CPUs from ever executing a context switch,
-which, in the RCU implementation of that time, prevented grace periods
-from ever ending.
-The result was an out-of-memory condition and a system hang.
-
-<p>
-The solution was the creation of RCU-bh, which does
-<tt>local_bh_disable()</tt>
-across its read-side critical sections, and which uses the transition
-from one type of softirq processing to another as a quiescent state
-in addition to context switch, idle, user mode, and offline.
-This means that RCU-bh grace periods can complete even when some of
-the CPUs execute in softirq indefinitely, thus allowing algorithms
-based on RCU-bh to withstand network-based denial-of-service attacks.
-
-<p>
-Because
-<tt>rcu_read_lock_bh()</tt> and <tt>rcu_read_unlock_bh()</tt>
-disable and re-enable softirq handlers, any attempt to start a softirq
-handlers during the
-RCU-bh read-side critical section will be deferred.
-In this case, <tt>rcu_read_unlock_bh()</tt>
-will invoke softirq processing, which can take considerable time.
-One can of course argue that this softirq overhead should be associated
-with the code following the RCU-bh read-side critical section rather
-than <tt>rcu_read_unlock_bh()</tt>, but the fact
-is that most profiling tools cannot be expected to make this sort
-of fine distinction.
-For example, suppose that a three-millisecond-long RCU-bh read-side
-critical section executes during a time of heavy networking load.
-There will very likely be an attempt to invoke at least one softirq
-handler during that three milliseconds, but any such invocation will
-be delayed until the time of the <tt>rcu_read_unlock_bh()</tt>.
-This can of course make it appear at first glance as if
-<tt>rcu_read_unlock_bh()</tt> was executing very slowly.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">RCU-bh API</a>
-includes
-<tt>rcu_read_lock_bh()</tt>,
-<tt>rcu_read_unlock_bh()</tt>,
-<tt>rcu_dereference_bh()</tt>,
-<tt>rcu_dereference_bh_check()</tt>,
-<tt>synchronize_rcu_bh()</tt>,
-<tt>synchronize_rcu_bh_expedited()</tt>,
-<tt>call_rcu_bh()</tt>,
-<tt>rcu_barrier_bh()</tt>, and
-<tt>rcu_read_lock_bh_held()</tt>.
-
-<h3><a name="Sched Flavor">Sched Flavor</a></h3>
-
-<p>
-Before preemptible RCU, waiting for an RCU grace period had the
-side effect of also waiting for all pre-existing interrupt
-and NMI handlers.
-However, there are legitimate preemptible-RCU implementations that
-do not have this property, given that any point in the code outside
-of an RCU read-side critical section can be a quiescent state.
-Therefore, <i>RCU-sched</i> was created, which follows &ldquo;classic&rdquo;
-RCU in that an RCU-sched grace period waits for for pre-existing
-interrupt and NMI handlers.
-In kernels built with <tt>CONFIG_PREEMPT=n</tt>, the RCU and RCU-sched
-APIs have identical implementations, while kernels built with
-<tt>CONFIG_PREEMPT=y</tt> provide a separate implementation for each.
-
-<p>
-Note well that in <tt>CONFIG_PREEMPT=y</tt> kernels,
-<tt>rcu_read_lock_sched()</tt> and <tt>rcu_read_unlock_sched()</tt>
-disable and re-enable preemption, respectively.
-This means that if there was a preemption attempt during the
-RCU-sched read-side critical section, <tt>rcu_read_unlock_sched()</tt>
-will enter the scheduler, with all the latency and overhead entailed.
-Just as with <tt>rcu_read_unlock_bh()</tt>, this can make it look
-as if <tt>rcu_read_unlock_sched()</tt> was executing very slowly.
-However, the highest-priority task won't be preempted, so that task
-will enjoy low-overhead <tt>rcu_read_unlock_sched()</tt> invocations.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">RCU-sched API</a>
-includes
-<tt>rcu_read_lock_sched()</tt>,
-<tt>rcu_read_unlock_sched()</tt>,
-<tt>rcu_read_lock_sched_notrace()</tt>,
-<tt>rcu_read_unlock_sched_notrace()</tt>,
-<tt>rcu_dereference_sched()</tt>,
-<tt>rcu_dereference_sched_check()</tt>,
-<tt>synchronize_sched()</tt>,
-<tt>synchronize_rcu_sched_expedited()</tt>,
-<tt>call_rcu_sched()</tt>,
-<tt>rcu_barrier_sched()</tt>, and
-<tt>rcu_read_lock_sched_held()</tt>.
-However, anything that disables preemption also marks an RCU-sched
-read-side critical section, including
-<tt>preempt_disable()</tt> and <tt>preempt_enable()</tt>,
-<tt>local_irq_save()</tt> and <tt>local_irq_restore()</tt>,
-and so on.
-
-<h3><a name="Sleepable RCU">Sleepable RCU</a></h3>
-
-<p>
-For well over a decade, someone saying &ldquo;I need to block within
-an RCU read-side critical section&rdquo; was a reliable indication
-that this someone did not understand RCU.
-After all, if you are always blocking in an RCU read-side critical
-section, you can probably afford to use a higher-overhead synchronization
-mechanism.
-However, that changed with the advent of the Linux kernel's notifiers,
-whose RCU read-side critical
-sections almost never sleep, but sometimes need to.
-This resulted in the introduction of
-<a href="https://lwn.net/Articles/202847/">sleepable RCU</a>,
-or <i>SRCU</i>.
-
-<p>
-SRCU allows different domains to be defined, with each such domain
-defined by an instance of an <tt>srcu_struct</tt> structure.
-A pointer to this structure must be passed in to each SRCU function,
-for example, <tt>synchronize_srcu(&amp;ss)</tt>, where
-<tt>ss</tt> is the <tt>srcu_struct</tt> structure.
-The key benefit of these domains is that a slow SRCU reader in one
-domain does not delay an SRCU grace period in some other domain.
-That said, one consequence of these domains is that read-side code
-must pass a &ldquo;cookie&rdquo; from <tt>srcu_read_lock()</tt>
-to <tt>srcu_read_unlock()</tt>, for example, as follows:
-
-<blockquote>
-<pre>
- 1 int idx;
- 2
- 3 idx = srcu_read_lock(&amp;ss);
- 4 do_something();
- 5 srcu_read_unlock(&amp;ss, idx);
-</pre>
-</blockquote>
-
-<p>
-As noted above, it is legal to block within SRCU read-side critical sections,
-however, with great power comes great responsibility.
-If you block forever in one of a given domain's SRCU read-side critical
-sections, then that domain's grace periods will also be blocked forever.
-Of course, one good way to block forever is to deadlock, which can
-happen if any operation in a given domain's SRCU read-side critical
-section can block waiting, either directly or indirectly, for that domain's
-grace period to elapse.
-For example, this results in a self-deadlock:
-
-<blockquote>
-<pre>
- 1 int idx;
- 2
- 3 idx = srcu_read_lock(&amp;ss);
- 4 do_something();
- 5 synchronize_srcu(&amp;ss);
- 6 srcu_read_unlock(&amp;ss, idx);
-</pre>
-</blockquote>
-
-<p>
-However, if line&nbsp;5 acquired a mutex that was held across
-a <tt>synchronize_srcu()</tt> for domain <tt>ss</tt>,
-deadlock would still be possible.
-Furthermore, if line&nbsp;5 acquired a mutex that was held across
-a <tt>synchronize_srcu()</tt> for some other domain <tt>ss1</tt>,
-and if an <tt>ss1</tt>-domain SRCU read-side critical section
-acquired another mutex that was held across as <tt>ss</tt>-domain
-<tt>synchronize_srcu()</tt>,
-deadlock would again be possible.
-Such a deadlock cycle could extend across an arbitrarily large number
-of different SRCU domains.
-Again, with great power comes great responsibility.
-
-<p>
-Unlike the other RCU flavors, SRCU read-side critical sections can
-run on idle and even offline CPUs.
-This ability requires that <tt>srcu_read_lock()</tt> and
-<tt>srcu_read_unlock()</tt> contain memory barriers, which means
-that SRCU readers will run a bit slower than would RCU readers.
-It also motivates the <tt>smp_mb__after_srcu_read_unlock()</tt>
-API, which, in combination with <tt>srcu_read_unlock()</tt>,
-guarantees a full memory barrier.
-
-<p>
-The
-<a href="https://lwn.net/Articles/609973/#RCU Per-Flavor API Table">SRCU API</a>
-includes
-<tt>srcu_read_lock()</tt>,
-<tt>srcu_read_unlock()</tt>,
-<tt>srcu_dereference()</tt>,
-<tt>srcu_dereference_check()</tt>,
-<tt>synchronize_srcu()</tt>,
-<tt>synchronize_srcu_expedited()</tt>,
-<tt>call_srcu()</tt>,
-<tt>srcu_barrier()</tt>, and
-<tt>srcu_read_lock_held()</tt>.
-It also includes
-<tt>DEFINE_SRCU()</tt>,
-<tt>DEFINE_STATIC_SRCU()</tt>, and
-<tt>init_srcu_struct()</tt>
-APIs for defining and initializing <tt>srcu_struct</tt> structures.
-
-<h3><a name="Tasks RCU">Tasks RCU</a></h3>
-
-<p>
-Some forms of tracing use &ldquo;tramopolines&rdquo; to handle the
-binary rewriting required to install different types of probes.
-It would be good to be able to free old trampolines, which sounds
-like a job for some form of RCU.
-However, because it is necessary to be able to install a trace
-anywhere in the code, it is not possible to use read-side markers
-such as <tt>rcu_read_lock()</tt> and <tt>rcu_read_unlock()</tt>.
-In addition, it does not work to have these markers in the trampoline
-itself, because there would need to be instructions following
-<tt>rcu_read_unlock()</tt>.
-Although <tt>synchronize_rcu()</tt> would guarantee that execution
-reached the <tt>rcu_read_unlock()</tt>, it would not be able to
-guarantee that execution had completely left the trampoline.
-
-<p>
-The solution, in the form of
-<a href="https://lwn.net/Articles/607117/"><i>Tasks RCU</i></a>,
-is to have implicit
-read-side critical sections that are delimited by voluntary context
-switches, that is, calls to <tt>schedule()</tt>,
-<tt>cond_resched_rcu_qs()</tt>, and
-<tt>synchronize_rcu_tasks()</tt>.
-In addition, transitions to and from userspace execution also delimit
-tasks-RCU read-side critical sections.
-
-<p>
-The tasks-RCU API is quite compact, consisting only of
-<tt>call_rcu_tasks()</tt>,
-<tt>synchronize_rcu_tasks()</tt>, and
-<tt>rcu_barrier_tasks()</tt>.
-
-<h2><a name="Possible Future Changes">Possible Future Changes</a></h2>
-
-<p>
-One of the tricks that RCU uses to attain update-side scalability is
-to increase grace-period latency with increasing numbers of CPUs.
-If this becomes a serious problem, it will be necessary to rework the
-grace-period state machine so as to avoid the need for the additional
-latency.
-
-<p>
-Expedited grace periods scan the CPUs, so their latency and overhead
-increases with increasing numbers of CPUs.
-If this becomes a serious problem on large systems, it will be necessary
-to do some redesign to avoid this scalability problem.
-
-<p>
-RCU disables CPU hotplug in a few places, perhaps most notably in the
-expedited grace-period and <tt>rcu_barrier()</tt> operations.
-If there is a strong reason to use expedited grace periods in CPU-hotplug
-notifiers, it will be necessary to avoid disabling CPU hotplug.
-This would introduce some complexity, so there had better be a <i>very</i>
-good reason.
-
-<p>
-The tradeoff between grace-period latency on the one hand and interruptions
-of other CPUs on the other hand may need to be re-examined.
-The desire is of course for zero grace-period latency as well as zero
-interprocessor interrupts undertaken during an expedited grace period
-operation.
-While this ideal is unlikely to be achievable, it is quite possible that
-further improvements can be made.
-
-<p>
-The multiprocessor implementations of RCU use a combining tree that
-groups CPUs so as to reduce lock contention and increase cache locality.
-However, this combining tree does not spread its memory across NUMA
-nodes nor does it align the CPU groups with hardware features such
-as sockets or cores.
-Such spreading and alignment is currently believed to be unnecessary
-because the hotpath read-side primitives do not access the combining
-tree, nor does <tt>call_rcu()</tt> in the common case.
-If you believe that your architecture needs such spreading and alignment,
-then your architecture should also benefit from the
-<tt>rcutree.rcu_fanout_leaf</tt> boot parameter, which can be set
-to the number of CPUs in a socket, NUMA node, or whatever.
-If the number of CPUs is too large, use a fraction of the number of
-CPUs.
-If the number of CPUs is a large prime number, well, that certainly
-is an &ldquo;interesting&rdquo; architectural choice!
-More flexible arrangements might be considered, but only if
-<tt>rcutree.rcu_fanout_leaf</tt> has proven inadequate, and only
-if the inadequacy has been demonstrated by a carefully run and
-realistic system-level workload.
-
-<p>
-Please note that arrangements that require RCU to remap CPU numbers will
-require extremely good demonstration of need and full exploration of
-alternatives.
-
-<p>
-There is an embarrassingly large number of flavors of RCU, and this
-number has been increasing over time.
-Perhaps it will be possible to combine some at some future date.
-
-<p>
-RCU's various kthreads are reasonably recent additions.
-It is quite likely that adjustments will be required to more gracefully
-handle extreme loads.
-It might also be necessary to be able to relate CPU utilization by
-RCU's kthreads and softirq handlers to the code that instigated this
-CPU utilization.
-For example, RCU callback overhead might be charged back to the
-originating <tt>call_rcu()</tt> instance, though probably not
-in production kernels.
-
-<h2><a name="Summary">Summary</a></h2>
-
-<p>
-This document has presented more than two decade's worth of RCU
-requirements.
-Given that the requirements keep changing, this will not be the last
-word on this subject, but at least it serves to get an important
-subset of the requirements set forth.
-
-<h2><a name="Acknowledgments">Acknowledgments</a></h2>
-
-I am grateful to Steven Rostedt, Lai Jiangshan, Ingo Molnar,
-Oleg Nesterov, Borislav Petkov, Peter Zijlstra, Boqun Feng, and
-Andy Lutomirski for their help in rendering
-this article human readable, and to Michelle Rankin for her support
-of this effort.
-Other contributions are acknowledged in the Linux kernel's git archive.
-The cartoon is copyright (c) 2013 by Melissa Broussard,
-and is provided
-under the terms of the Creative Commons Attribution-Share Alike 3.0
-United States license.
-
-<p>@@QQAL@@
-
-</body></html>
diff --git a/Documentation/RCU/Design/htmlqqz.sh b/Documentation/RCU/Design/htmlqqz.sh
deleted file mode 100755 (executable)
index d354f06..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/sh
-#
-# Usage: sh htmlqqz.sh file
-#
-# Extracts and converts quick quizzes in a proto-HTML document file.htmlx.
-# Commands, all of which must be on a line by themselves:
-#
-#      "<p>@@QQ@@": Start of a quick quiz.
-#      "<p>@@QQA@@": Start of a quick-quiz answer.
-#      "<p>@@QQE@@": End of a quick-quiz answer, and thus of the quick quiz.
-#      "<p>@@QQAL@@": Place to put quick-quiz answer list.
-#
-# Places the result in file.html.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, you can access it online at
-# http://www.gnu.org/licenses/gpl-2.0.html.
-#
-# Copyright (c) 2013 Paul E. McKenney, IBM Corporation.
-
-fn=$1
-if test ! -r $fn.htmlx
-then
-       echo "Error: $fn.htmlx unreadable."
-       exit 1
-fi
-
-echo "<!-- DO NOT HAND EDIT. -->" > $fn.html
-echo "<!-- Instead, edit $fn.htmlx and run 'sh htmlqqz.sh $fn' -->" >> $fn.html
-awk < $fn.htmlx >> $fn.html '
-
-state == "" && $1 != "<p>@@QQ@@" && $1 != "<p>@@QQAL@@" {
-       print $0;
-       if ($0 ~ /^<p>@@QQ/)
-               print "Bad Quick Quiz command: " NR " (expected <p>@@QQ@@ or <p>@@QQAL@@)." > "/dev/stderr"
-       next;
-}
-
-state == "" && $1 == "<p>@@QQ@@" {
-       qqn++;
-       qqlineno = NR;
-       haveqq = 1;
-       state = "qq";
-       print "<p><a name=\"Quick Quiz " qqn "\"><b>Quick Quiz " qqn "</b>:</a>"
-       next;
-}
-
-state == "qq" && $1 != "<p>@@QQA@@" {
-       qq[qqn] = qq[qqn] $0 "\n";
-       print $0
-       if ($0 ~ /^<p>@@QQ/)
-               print "Bad Quick Quiz command: " NR ". (expected <p>@@QQA@@)" > "/dev/stderr"
-       next;
-}
-
-state == "qq" && $1 == "<p>@@QQA@@" {
-       state = "qqa";
-       print "<br><a href=\"#qq" qqn "answer\">Answer</a>"
-       next;
-}
-
-state == "qqa" && $1 != "<p>@@QQE@@" {
-       qqa[qqn] = qqa[qqn] $0 "\n";
-       if ($0 ~ /^<p>@@QQ/)
-               print "Bad Quick Quiz command: " NR " (expected <p>@@QQE@@)." > "/dev/stderr"
-       next;
-}
-
-state == "qqa" && $1 == "<p>@@QQE@@" {
-       state = "";
-       next;
-}
-
-state == "" && $1 == "<p>@@QQAL@@" {
-       haveqq = "";
-       print "<h3><a name=\"Answers to Quick Quizzes\">"
-       print "Answers to Quick Quizzes</a></h3>"
-       print "";
-       for (i = 1; i <= qqn; i++) {
-               print "<a name=\"qq" i "answer\"></a>"
-               print "<p><b>Quick Quiz " i "</b>:"
-               print qq[i];
-               print "";
-               print "</p><p><b>Answer</b>:"
-               print qqa[i];
-               print "";
-               print "</p><p><a href=\"#Quick%20Quiz%20" i "\"><b>Back to Quick Quiz " i "</b>.</a>"
-               print "";
-       }
-       next;
-}
-
-END {
-       if (state != "")
-               print "Unterminated Quick Quiz: " qqlineno "." > "/dev/stderr"
-       else if (haveqq)
-               print "Missing \"<p>@@QQAL@@\", no Quick Quiz." > "/dev/stderr"
-}'
index ec6998b1b6d04f3139ed6c066537cc059c89838d..00a3a38b375ae9946425fc2ea94fa0c2383e867c 100644 (file)
@@ -237,17 +237,17 @@ o "ktl" is the low-order 16 bits (in hexadecimal) of the count of
 
 The output of "cat rcu/rcu_preempt/rcuexp" looks as follows:
 
-s=21872 wd0=0 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
+s=21872 wd1=0 wd2=0 wd3=5 n=0 enq=0 sc=21872
 
 These fields are as follows:
 
 o      "s" is the sequence number, with an odd number indicating that
        an expedited grace period is in progress.
 
-o      "wd0", "wd1", "wd2", and "wd3" are the number of times that an
-       attempt to start an expedited grace period found that someone
-       else had completed an expedited grace period that satisfies the
-       attempted request.  "Our work is done."
+o      "wd1", "wd2", and "wd3" are the number of times that an attempt
+       to start an expedited grace period found that someone else had
+       completed an expedited grace period that satisfies the attempted
+       request.  "Our work is done."
 
 o      "n" is number of times that a concurrent CPU-hotplug operation
        forced a fallback to a normal grace period.
index dc49c6712b17ff4968d3c4fdf2b304e0292fb5be..111770ffa10e7cc4c3d2dada09e1e85f8d576099 100644 (file)
@@ -681,22 +681,30 @@ Although RCU can be used in many different ways, a very common use of
 RCU is analogous to reader-writer locking.  The following unified
 diff shows how closely related RCU and reader-writer locking can be.
 
+       @@ -5,5 +5,5 @@ struct el {
+               int data;
+               /* Other data fields */
+        };
+       -rwlock_t listmutex;
+       +spinlock_t listmutex;
+        struct el head;
+
        @@ -13,15 +14,15 @@
                struct list_head *lp;
                struct el *p;
 
-       -       read_lock();
+       -       read_lock(&listmutex);
        -       list_for_each_entry(p, head, lp) {
        +       rcu_read_lock();
        +       list_for_each_entry_rcu(p, head, lp) {
                        if (p->key == key) {
                                *result = p->data;
-       -                       read_unlock();
+       -                       read_unlock(&listmutex);
        +                       rcu_read_unlock();
                                return 1;
                        }
                }
-       -       read_unlock();
+       -       read_unlock(&listmutex);
        +       rcu_read_unlock();
                return 0;
         }
@@ -732,7 +740,7 @@ Or, for those who prefer a side-by-side listing:
  5   int data;                          5   int data;
  6   /* Other data fields */            6   /* Other data fields */
  7 };                                   7 };
- 8 spinlock_t listmutex;                8 spinlock_t listmutex;
+ 8 rwlock_t listmutex;                  8 spinlock_t listmutex;
  9 struct el head;                      9 struct el head;
 
  1 int search(long key, int *result)    1 int search(long key, int *result)
@@ -740,15 +748,15 @@ Or, for those who prefer a side-by-side listing:
  3   struct list_head *lp;              3   struct list_head *lp;
  4   struct el *p;                      4   struct el *p;
  5                                      5
- 6   read_lock();                       6   rcu_read_lock();
+ 6   read_lock(&listmutex);             6   rcu_read_lock();
  7   list_for_each_entry(p, head, lp) { 7   list_for_each_entry_rcu(p, head, lp) {
  8     if (p->key == key) {             8     if (p->key == key) {
  9       *result = p->data;             9       *result = p->data;
-10       read_unlock();                10       rcu_read_unlock();
+10       read_unlock(&listmutex);      10       rcu_read_unlock();
 11       return 1;                     11       return 1;
 12     }                               12     }
 13   }                                 13   }
-14   read_unlock();                    14   rcu_read_unlock();
+14   read_unlock(&listmutex);          14   rcu_read_unlock();
 15   return 0;                         15   return 0;
 16 }                                   16 }
 
index 30df832a6f2f513f4e23841dd0a0ad888cc13dba..87adfb227ca92facd1b3dfe75501b21e9a610a52 100644 (file)
@@ -32,6 +32,10 @@ Optional properties:
 - target-supply     : regulator for SATA target power
 - phys              : reference to the SATA PHY node
 - phy-names         : must be "sata-phy"
+- ports-implemented : Mask that indicates which ports that the HBA supports
+                     are available for software to use. Useful if PORTS_IMPL
+                     is not programmed by the BIOS, which is true with
+                     some embedded SOC's.
 
 Required properties when using sub-nodes:
 - #address-cells    : number of cells to encode an address
index 28a4781ab6d7b9d6a1ab553ed96857f0f509e250..0ae06491b4302209607340a5f4b250c7ca1fe100 100644 (file)
@@ -45,13 +45,13 @@ Required properties:
 Optional properties:
 - dual_emac_res_vlan   : Specifies VID to be used to segregate the ports
 - mac-address          : See ethernet.txt file in the same directory
-- phy_id               : Specifies slave phy id
+- phy_id               : Specifies slave phy id (deprecated, use phy-handle)
 - phy-handle           : See ethernet.txt file in the same directory
 
 Slave sub-nodes:
 - fixed-link           : See fixed-link.txt file in the same directory
-                         Either the property phy_id, or the sub-node
-                         fixed-link can be specified
+
+Note: Exactly one of phy_id, phy-handle, or fixed-link must be specified.
 
 Note: "ti,hwmods" field is used to fetch the base address and irq
 resources from TI, omap hwmod data base during device registration.
index e98a9652ccc8c4d3a2263fe5a67b9064b27d1f04..0127be360fe852d70a853026c03de1fcc8c8a7a0 100644 (file)
@@ -1,50 +1,29 @@
-Device-Tree binding for regmap
-
-The endianness mode of CPU & Device scenarios:
-Index     Device     Endianness properties
----------------------------------------------------
-1         BE         'big-endian'
-2         LE         'little-endian'
-3        Native     'native-endian'
-
-For one device driver, which will run in different scenarios above
-on different SoCs using the devicetree, we need one way to simplify
-this.
+Devicetree binding for regmap
 
 Optional properties:
-- {big,little,native}-endian: these are boolean properties, if absent
-  then the implementation will choose a default based on the device
-  being controlled.  These properties are for register values and all
-  the buffers only.  Native endian means that the CPU and device have
-  the same endianness.
 
-Examples:
-Scenario 1 : CPU in LE mode & device in LE mode.
-dev: dev@40031000 {
-             compatible = "name";
-             reg = <0x40031000 0x1000>;
-             ...
-};
+   little-endian,
+   big-endian,
+   native-endian:      See common-properties.txt for a definition
 
-Scenario 2 : CPU in LE mode & device in BE mode.
-dev: dev@40031000 {
-             compatible = "name";
-             reg = <0x40031000 0x1000>;
-             ...
-             big-endian;
-};
+Note:
+Regmap defaults to little-endian register access on MMIO based
+devices, this is by far the most common setting. On CPU
+architectures that typically run big-endian operating systems
+(e.g. PowerPC), registers can be defined as big-endian and must
+be marked that way in the devicetree.
 
-Scenario 3 : CPU in BE mode & device in BE mode.
-dev: dev@40031000 {
-             compatible = "name";
-             reg = <0x40031000 0x1000>;
-             ...
-};
+On SoCs that can be operated in both big-endian and little-endian
+modes, with a single hardware switch controlling both the endianess
+of the CPU and a byteswap for MMIO registers (e.g. many Broadcom MIPS
+chips), "native-endian" is used to allow using the same device tree
+blob in both cases.
 
-Scenario 4 : CPU in BE mode & device in LE mode.
+Examples:
+Scenario 1 : a register set in big-endian mode.
 dev: dev@40031000 {
-             compatible = "name";
+             compatible = "syscon";
              reg = <0x40031000 0x1000>;
+             big-endian;
              ...
-             little-endian;
 };
index 0b3de80ec8f69c1aef9dfaa59fb86c14395ba43e..49673bd30b87fd4638f5b6b570d759320a817e81 100644 (file)
@@ -3284,6 +3284,44 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Lazy RCU callbacks are those which RCU can
                        prove do nothing more than free memory.
 
+       rcuperf.gp_exp= [KNL]
+                       Measure performance of expedited synchronous
+                       grace-period primitives.
+
+       rcuperf.holdoff= [KNL]
+                       Set test-start holdoff period.  The purpose of
+                       this parameter is to delay the start of the
+                       test until boot completes in order to avoid
+                       interference.
+
+       rcuperf.nreaders= [KNL]
+                       Set number of RCU readers.  The value -1 selects
+                       N, where N is the number of CPUs.  A value
+                       "n" less than -1 selects N-n+1, where N is again
+                       the number of CPUs.  For example, -2 selects N
+                       (the number of CPUs), -3 selects N+1, and so on.
+                       A value of "n" less than or equal to -N selects
+                       a single reader.
+
+       rcuperf.nwriters= [KNL]
+                       Set number of RCU writers.  The values operate
+                       the same as for rcuperf.nreaders.
+                       N, where N is the number of CPUs
+
+       rcuperf.perf_runnable= [BOOT]
+                       Start rcuperf running at boot time.
+
+       rcuperf.shutdown= [KNL]
+                       Shut the system down after performance tests
+                       complete.  This is useful for hands-off automated
+                       testing.
+
+       rcuperf.perf_type= [KNL]
+                       Specify the RCU implementation to test.
+
+       rcuperf.verbose= [KNL]
+                       Enable additional printk() statements.
+
        rcutorture.cbflood_inter_holdoff= [KNL]
                        Set holdoff time (jiffies) between successive
                        callback-flood tests.
index 3f24df8c6e6557cf1cf0d9e25857dead9cd0d94d..50b8589d12fd167371c87ca457708146bb765ae3 100644 (file)
@@ -6,7 +6,7 @@ This is the driver for the Altera Triple-Speed Ethernet (TSE) controllers
 using the SGDMA and MSGDMA soft DMA IP components. The driver uses the
 platform bus to obtain component resources. The designs used to test this
 driver were built for a Cyclone(R) V SOC FPGA board, a Cyclone(R) V FPGA board,
-and tested with ARM and NIOS processor hosts seperately. The anticipated use
+and tested with ARM and NIOS processor hosts separately. The anticipated use
 cases are simple communications between an embedded system and an external peer
 for status and simple configuration of the embedded system.
 
@@ -65,14 +65,14 @@ Driver parameters can be also passed in command line by using:
 4.1) Transmit process
 When the driver's transmit routine is called by the kernel, it sets up a
 transmit descriptor by calling the underlying DMA transmit routine (SGDMA or
-MSGDMA), and initites a transmit operation. Once the transmit is complete, an
+MSGDMA), and initiates a transmit operation. Once the transmit is complete, an
 interrupt is driven by the transmit DMA logic. The driver handles the transmit
 completion in the context of the interrupt handling chain by recycling
 resource required to send and track the requested transmit operation.
 
 4.2) Receive process
 The driver will post receive buffers to the receive DMA logic during driver
-intialization. Receive buffers may or may not be queued depending upon the
+initialization. Receive buffers may or may not be queued depending upon the
 underlying DMA logic (MSGDMA is able queue receive buffers, SGDMA is not able
 to queue receive buffers to the SGDMA receive logic). When a packet is
 received, the DMA logic generates an interrupt. The driver handles a receive
index de2a327766a7ed2285b65cf4023c214205f47e70..56e36861245f108595bb340123b2dece1ea2bead 100644 (file)
@@ -69,18 +69,18 @@ LCO: Local Checksum Offload
 LCO is a technique for efficiently computing the outer checksum of an
  encapsulated datagram when the inner checksum is due to be offloaded.
 The ones-complement sum of a correctly checksummed TCP or UDP packet is
- equal to the sum of the pseudo header, because everything else gets
- 'cancelled out' by the checksum field.  This is because the sum was
+ equal to the complement of the sum of the pseudo header, because everything
else gets 'cancelled out' by the checksum field.  This is because the sum was
  complemented before being written to the checksum field.
 More generally, this holds in any case where the 'IP-style' ones complement
  checksum is used, and thus any checksum that TX Checksum Offload supports.
 That is, if we have set up TX Checksum Offload with a start/offset pair, we
- know that _after the device has filled in that checksum_, the ones
+ know that after the device has filled in that checksum, the ones
  complement sum from csum_start to the end of the packet will be equal to
- _whatever value we put in the checksum field beforehand_.  This allows us
- to compute the outer checksum without looking at the payload: we simply
- stop summing when we get to csum_start, then add the 16-bit word at
- (csum_start + csum_offset).
+ the complement of whatever value we put in the checksum field beforehand.
+ This allows us to compute the outer checksum without looking at the payload:
+ we simply stop summing when we get to csum_start, then add the complement of
the 16-bit word at (csum_start + csum_offset).
 Then, when the true inner checksum is filled in (either by hardware or by
  skb_checksum_help()), the outer checksum will become correct by virtue of
  the arithmetic.
index cf996394e466b708d5ca02757143205d519ceb5b..14422f8fcdc474f5f32cde7868803cf67fbb0805 100644 (file)
@@ -8,7 +8,7 @@ Initial Release:
        This is conceptually very similar to the macvlan driver with one major
 exception of using L3 for mux-ing /demux-ing among slaves. This property makes
 the master device share the L2 with it's slave devices. I have developed this
-driver in conjuntion with network namespaces and not sure if there is use case
+driver in conjunction with network namespaces and not sure if there is use case
 outside of it.
 
 
@@ -42,7 +42,7 @@ out. In this mode the slaves will RX/TX multicast and broadcast (if applicable)
 as well.
 
 4.2 L3 mode:
-       In this mode TX processing upto L3 happens on the stack instance attached
+       In this mode TX processing up to L3 happens on the stack instance attached
 to the slave device and packets are switched to the stack instance of the
 master device for the L2 processing and routing from that instance will be
 used before packets are queued on the outbound device. In this mode the slaves
@@ -56,7 +56,7 @@ situations defines your use case then you can choose to use ipvlan -
        (a) The Linux host that is connected to the external switch / router has
 policy configured that allows only one mac per port.
        (b) No of virtual devices created on a master exceed the mac capacity and
-puts the NIC in promiscous mode and degraded performance is a concern.
+puts the NIC in promiscuous mode and degraded performance is a concern.
        (c) If the slave device is to be put into the hostile / untrusted network
 namespace where L2 on the slave could be changed / misused.
 
index f4be85e9600578e7411f1baa1ab37041a677fe4a..2c4e3354e12891e755de5986b6f9fa82eff0657b 100644 (file)
@@ -67,12 +67,12 @@ The two basic thread commands are:
  * add_device DEVICE@NAME -- adds a single device
  * rem_device_all         -- remove all associated devices
 
-When adding a device to a thread, a corrosponding procfile is created
+When adding a device to a thread, a corresponding procfile is created
 which is used for configuring this device. Thus, device names need to
 be unique.
 
 To support adding the same device to multiple threads, which is useful
-with multi queue NICs, the device naming scheme is extended with "@":
+with multi queue NICs, the device naming scheme is extended with "@":
  device@something
 
 The part after "@" can be anything, but it is custom to use the thread
@@ -221,7 +221,7 @@ Sample scripts
 
 A collection of tutorial scripts and helpers for pktgen is in the
 samples/pktgen directory. The helper parameters.sh file support easy
-and consistant parameter parsing across the sample scripts.
+and consistent parameter parsing across the sample scripts.
 
 Usage example and help:
  ./pktgen_sample01_simple.sh -i eth4 -m 00:1B:21:3C:9D:F8 -d 192.168.8.2
index d52aa10cfe911c88b47927c25cfd8ef596c65986..5da679c573d2326c88cdcd6eccde9805f8dd06dc 100644 (file)
@@ -41,7 +41,7 @@ using an rx_handler which gives the impression that packets flow through
 the VRF device. Similarly on egress routing rules are used to send packets
 to the VRF device driver before getting sent out the actual interface. This
 allows tcpdump on a VRF device to capture all packets into and out of the
-VRF as a whole.[1] Similiarly, netfilter [2] and tc rules can be applied
+VRF as a whole.[1] Similarly, netfilter [2] and tc rules can be applied
 using the VRF device to specify rules that apply to the VRF domain as a whole.
 
 [1] Packets in the forwarded state do not flow through the device, so those
index d7aac9dedeb4266d970e8cc910726a027305a0b3..8d88e0f2ec493be2e519dbaec3e7995f5027a325 100644 (file)
@@ -4,7 +4,7 @@ Krisztian <hidden@balabit.hu> and others and additional patches
 from Jamal <hadi@cyberus.ca>.
 
 The end goal for syncing is to be able to insert attributes + generate
-events so that the an SA can be safely moved from one machine to another
+events so that the SA can be safely moved from one machine to another
 for HA purposes.
 The idea is to synchronize the SA so that the takeover machine can do
 the processing of the SA as accurate as possible if it has access to it.
@@ -13,7 +13,7 @@ We already have the ability to generate SA add/del/upd events.
 These patches add ability to sync and have accurate lifetime byte (to
 ensure proper decay of SAs) and replay counters to avoid replay attacks
 with as minimal loss at failover time.
-This way a backup stays as closely uptodate as an active member.
+This way a backup stays as closely up-to-date as an active member.
 
 Because the above items change for every packet the SA receives,
 it is possible for a lot of the events to be generated.
@@ -163,7 +163,7 @@ If you have an SA that is getting hit by traffic in bursts such that
 there is a period where the timer threshold expires with no packets
 seen, then an odd behavior is seen as follows:
 The first packet arrival after a timer expiry will trigger a timeout
-aevent; i.e we dont wait for a timeout period or a packet threshold
+event; i.e we don't wait for a timeout period or a packet threshold
 to be reached. This is done for simplicity and efficiency reasons.
 
 -JHS
index 57653a44b128c821520b071f0ec4b53bfacc7b96..fcddfd5ded999a3ac8d5624f150439c402e8f0ed 100644 (file)
@@ -645,7 +645,7 @@ allowed to execute.
 perf_event_paranoid:
 
 Controls use of the performance events system by unprivileged
-users (without CAP_SYS_ADMIN).  The default value is 1.
+users (without CAP_SYS_ADMIN).  The default value is 2.
 
  -1: Allow use of (almost) all events by all users
 >=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
index 42e65d128d015e1aa9c907d424bc2fcc09cfbebf..9c567a431d8d44dc30104b2962366137c29911f2 100644 (file)
@@ -872,9 +872,9 @@ F:  drivers/perf/arm_pmu.c
 F:     include/linux/perf/arm_pmu.h
 
 ARM PORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/
 
@@ -886,35 +886,35 @@ F:        arch/arm/plat-*/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git
 
 ARM PRIMECELL AACI PL041 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     sound/arm/aaci.*
 
 ARM PRIMECELL CLCD PL110 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/video/fbdev/amba-clcd.*
 
 ARM PRIMECELL KMI PL050 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/input/serio/ambakmi.*
 F:     include/linux/amba/kmi.h
 
 ARM PRIMECELL MMCI PL180/1 DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/mmc/host/mmci.*
 F:     include/linux/amba/mmci.h
 
 ARM PRIMECELL UART PL010 AND PL011 DRIVERS
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/tty/serial/amba-pl01*.c
 F:     include/linux/amba/serial.h
 
 ARM PRIMECELL BUS SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 S:     Maintained
 F:     drivers/amba/
 F:     include/linux/amba/bus.h
@@ -1036,7 +1036,7 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 
 ARM/CLKDEV SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/include/asm/clkdev.h
@@ -1093,9 +1093,9 @@ F:        arch/arm/boot/dts/cx92755*
 N:     digicolor
 
 ARM/EBSA110 MACHINE SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/mach-ebsa110/
 F:     drivers/net/ethernet/amd/am79c961a.*
@@ -1124,9 +1124,9 @@ T:        git git://git.berlios.de/gemini-board
 F:     arch/arm/mm/*-fa*
 
 ARM/FOOTBRIDGE ARCHITECTURE
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/include/asm/hardware/dec21285.h
 F:     arch/arm/mach-footbridge/
@@ -1457,7 +1457,7 @@ S:        Maintained
 ARM/PT DIGITAL BOARD PORT
 M:     Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 
 ARM/QUALCOMM SUPPORT
@@ -1493,9 +1493,9 @@ S:        Supported
 F:     arch/arm64/boot/dts/renesas/
 
 ARM/RISCPC ARCHITECTURE
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/include/asm/hardware/entry-macro-iomd.S
 F:     arch/arm/include/asm/hardware/ioc.h
@@ -1773,9 +1773,9 @@ F:        drivers/clk/versatile/clk-vexpress-osc.c
 F:     drivers/clocksource/versatile.c
 
 ARM/VFP SUPPORT
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     arch/arm/vfp/
 
@@ -2921,7 +2921,7 @@ F:        mm/cleancache.c
 F:     include/linux/cleancache.h
 
 CLK API
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-clk@vger.kernel.org
 S:     Maintained
 F:     include/linux/clk.h
@@ -3354,9 +3354,9 @@ S:        Supported
 F:     drivers/net/ethernet/stmicro/stmmac/
 
 CYBERPRO FB DRIVER
-M:     Russell King <linux@arm.linux.org.uk>
+M:     Russell King <linux@armlinux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://www.arm.linux.org.uk/
+W:     http://www.armlinux.org.uk/
 S:     Maintained
 F:     drivers/video/fbdev/cyber2000fb.*
 
@@ -3881,7 +3881,7 @@ F:        Documentation/devicetree/bindings/display/st,stih4xx.txt
 
 DRM DRIVERS FOR VIVANTE GPU IP
 M:     Lucas Stach <l.stach@pengutronix.de>
-R:     Russell King <linux+etnaviv@arm.linux.org.uk>
+R:     Russell King <linux+etnaviv@armlinux.org.uk>
 R:     Christian Gmeiner <christian.gmeiner@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
@@ -4223,8 +4223,8 @@ F:        Documentation/efi-stub.txt
 F:     arch/ia64/kernel/efi.c
 F:     arch/x86/boot/compressed/eboot.[ch]
 F:     arch/x86/include/asm/efi.h
-F:     arch/x86/platform/efi/*
-F:     drivers/firmware/efi/*
+F:     arch/x86/platform/efi/
+F:     drivers/firmware/efi/
 F:     include/linux/efi*.h
 
 EFI VARIABLE FILESYSTEM
@@ -4744,7 +4744,7 @@ F:        drivers/platform/x86/fujitsu-tablet.c
 
 FUSE: FILESYSTEM IN USERSPACE
 M:     Miklos Szeredi <miklos@szeredi.hu>
-L:     fuse-devel@lists.sourceforge.net
+L:     linux-fsdevel@vger.kernel.org
 W:     http://fuse.sourceforge.net/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/fuse.git
 S:     Maintained
@@ -4903,7 +4903,7 @@ F:        net/ipv4/gre_offload.c
 F:     include/net/gre.h
 
 GRETH 10/100/1G Ethernet MAC device driver
-M:     Kristoffer Glembo <kristoffer@gaisler.com>
+M:     Andreas Larsson <andreas@gaisler.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/aeroflex/
@@ -6905,7 +6905,7 @@ L:        linux-man@vger.kernel.org
 S:     Maintained
 
 MARVELL ARMADA DRM SUPPORT
-M:     Russell King <rmk+kernel@arm.linux.org.uk>
+M:     Russell King <rmk+kernel@armlinux.org.uk>
 S:     Maintained
 F:     drivers/gpu/drm/armada/
 
@@ -7905,7 +7905,7 @@ S:        Supported
 F:     drivers/nfc/nxp-nci
 
 NXP TDA998X DRM DRIVER
-M:     Russell King <rmk+kernel@arm.linux.org.uk>
+M:     Russell King <rmk+kernel@armlinux.org.uk>
 S:     Supported
 F:     drivers/gpu/drm/i2c/tda998x_drv.c
 F:     include/drm/i2c/tda998x.h
@@ -7978,7 +7978,7 @@ F:        arch/arm/*omap*/*pm*
 F:     drivers/cpufreq/omap-cpufreq.c
 
 OMAP POWERDOMAIN SOC ADAPTATION LAYER SUPPORT
-M:     Rajendra Nayak <rnayak@ti.com>
+M:     Rajendra Nayak <rnayak@codeaurora.org>
 M:     Paul Walmsley <paul@pwsan.com>
 L:     linux-omap@vger.kernel.org
 S:     Maintained
@@ -10014,7 +10014,8 @@ F:      drivers/infiniband/hw/ocrdma/
 
 SFC NETWORK DRIVER
 M:     Solarflare linux maintainers <linux-net-drivers@solarflare.com>
-M:     Shradha Shah <sshah@solarflare.com>
+M:     Edward Cree <ecree@solarflare.com>
+M:     Bert Kenward <bkenward@solarflare.com>
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/sfc/
@@ -11317,6 +11318,20 @@ F:     include/trace/
 F:     kernel/trace/
 F:     tools/testing/selftests/ftrace/
 
+TRACING MMIO ACCESSES (MMIOTRACE)
+M:     Steven Rostedt <rostedt@goodmis.org>
+M:     Ingo Molnar <mingo@kernel.org>
+R:     Karol Herbst <karolherbst@gmail.com>
+R:     Pekka Paalanen <ppaalanen@gmail.com>
+S:     Maintained
+L:     linux-kernel@vger.kernel.org
+L:     nouveau@lists.freedesktop.org
+F:     kernel/trace/trace_mmiotrace.c
+F:     include/linux/mmiotrace.h
+F:     arch/x86/mm/kmmio.c
+F:     arch/x86/mm/mmio-mod.c
+F:     arch/x86/mm/testmmiotrace.c
+
 TRIVIAL PATCHES
 M:     Jiri Kosina <trivial@kernel.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/trivial.git
index 7466de60ddc7ab98c66c7a28f3cd4a502f465411..0f9cb36d45c2c59a589670679e7fa1d25ff9ee59 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc6
+EXTRAVERSION =
 NAME = Charred Weasel
 
 # *DOCUMENTATION*
index ec4791ea691196bf7137b99ead768b3cc030a056..a8767430df7d6012463c8a34366d251496d15364 100644 (file)
@@ -58,6 +58,9 @@ config GENERIC_CSUM
 config RWSEM_GENERIC_SPINLOCK
        def_bool y
 
+config ARCH_DISCONTIGMEM_ENABLE
+       def_bool y
+
 config ARCH_FLATMEM_ENABLE
        def_bool y
 
@@ -347,6 +350,15 @@ config ARC_HUGEPAGE_16M
 
 endchoice
 
+config NODES_SHIFT
+       int "Maximum NUMA Nodes (as a power of 2)"
+       default "1" if !DISCONTIGMEM
+       default "2" if DISCONTIGMEM
+       depends on NEED_MULTIPLE_NODES
+       ---help---
+         Accessing memory beyond 1GB (with or w/o PAE) requires 2 memory
+         zones.
+
 if ISA_ARCOMPACT
 
 config ARC_COMPACT_IRQ_LEVELS
@@ -455,6 +467,7 @@ config LINUX_LINK_BASE
 
 config HIGHMEM
        bool "High Memory Support"
+       select DISCONTIGMEM
        help
          With ARC 2G:2G address split, only upper 2G is directly addressable by
          kernel. Enable this to potentially allow access to rest of 2G and PAE
index 17f85c9c73cfe830a40280ad79bf9decb1b65a17..c22b181e8206f3162c4e0e19214f8b303f13c576 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/page.h>
 
+#ifdef CONFIG_ISA_ARCV2
+#include <asm/barrier.h>
+#define __iormb()              rmb()
+#define __iowmb()              wmb()
+#else
+#define __iormb()              do { } while (0)
+#define __iowmb()              do { } while (0)
+#endif
+
 extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
 extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
                                  unsigned long flags);
@@ -31,6 +40,15 @@ extern void iounmap(const void __iomem *addr);
 #define ioremap_wc(phy, sz)            ioremap(phy, sz)
 #define ioremap_wt(phy, sz)            ioremap(phy, sz)
 
+/*
+ * io{read,write}{16,32}be() macros
+ */
+#define ioread16be(p)          ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
+#define ioread32be(p)          ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
+
+#define iowrite16be(v,p)       ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
+#define iowrite32be(v,p)       ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
+
 /* Change struct page to physical address */
 #define page_to_phys(page)             (page_to_pfn(page) << PAGE_SHIFT)
 
@@ -108,15 +126,6 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
 
 }
 
-#ifdef CONFIG_ISA_ARCV2
-#include <asm/barrier.h>
-#define __iormb()              rmb()
-#define __iowmb()              wmb()
-#else
-#define __iormb()              do { } while (0)
-#define __iowmb()              do { } while (0)
-#endif
-
 /*
  * MMIO can also get buffered/optimized in micro-arch, so barriers needed
  * Based on ARM model for the typical use case
diff --git a/arch/arc/include/asm/mmzone.h b/arch/arc/include/asm/mmzone.h
new file mode 100644 (file)
index 0000000..8e97136
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_ARC_MMZONE_H
+#define _ASM_ARC_MMZONE_H
+
+#ifdef CONFIG_DISCONTIGMEM
+
+extern struct pglist_data node_data[];
+#define NODE_DATA(nid) (&node_data[nid])
+
+static inline int pfn_to_nid(unsigned long pfn)
+{
+       int is_end_low = 1;
+
+       if (IS_ENABLED(CONFIG_ARC_HAS_PAE40))
+               is_end_low = pfn <= virt_to_pfn(0xFFFFFFFFUL);
+
+       /*
+        * node 0: lowmem:             0x8000_0000   to 0xFFFF_FFFF
+        * node 1: HIGHMEM w/o  PAE40: 0x0           to 0x7FFF_FFFF
+        *         HIGHMEM with PAE40: 0x1_0000_0000 to ...
+        */
+       if (pfn >= ARCH_PFN_OFFSET && is_end_low)
+               return 0;
+
+       return 1;
+}
+
+static inline int pfn_valid(unsigned long pfn)
+{
+       int nid = pfn_to_nid(pfn);
+
+       return (pfn <= node_end_pfn(nid));
+}
+#endif /* CONFIG_DISCONTIGMEM  */
+
+#endif
index 36da89e2c853ef0551bfcee0f145454e23a81dd6..0d53854884d047557a981fece15936715a4cd950 100644 (file)
@@ -72,11 +72,20 @@ typedef unsigned long pgprot_t;
 
 typedef pte_t * pgtable_t;
 
+/*
+ * Use virt_to_pfn with caution:
+ * If used in pte or paddr related macros, it could cause truncation
+ * in PAE40 builds
+ * As a rule of thumb, only use it in helpers starting with virt_
+ * You have been warned !
+ */
 #define virt_to_pfn(kaddr)     (__pa(kaddr) >> PAGE_SHIFT)
 
 #define ARCH_PFN_OFFSET                virt_to_pfn(CONFIG_LINUX_LINK_BASE)
 
+#ifdef CONFIG_FLATMEM
 #define pfn_valid(pfn)         (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)
+#endif
 
 /*
  * __pa, __va, virt_to_page (ALERT: deprecated, don't use them)
@@ -85,12 +94,10 @@ typedef pte_t * pgtable_t;
  * virt here means link-address/program-address as embedded in object code.
  * And for ARC, link-addr = physical address
  */
-#define __pa(vaddr)  ((unsigned long)vaddr)
+#define __pa(vaddr)  ((unsigned long)(vaddr))
 #define __va(paddr)  ((void *)((unsigned long)(paddr)))
 
-#define virt_to_page(kaddr)    \
-       (mem_map + virt_to_pfn((kaddr) - CONFIG_LINUX_LINK_BASE))
-
+#define virt_to_page(kaddr)    pfn_to_page(virt_to_pfn(kaddr))
 #define virt_addr_valid(kaddr)  pfn_valid(virt_to_pfn(kaddr))
 
 /* Default Permissions for stack/heaps pages (Non Executable) */
index 7d6c93e63adf3af60a0642b03b3ae798061b1e65..10d4b8b8e5450e83468f96f16d9ad18b5c62d3b0 100644 (file)
@@ -278,14 +278,13 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
 #define pmd_present(x)                 (pmd_val(x))
 #define pmd_clear(xp)                  do { pmd_val(*(xp)) = 0; } while (0)
 
-#define pte_page(pte)  \
-       (mem_map + virt_to_pfn(pte_val(pte) - CONFIG_LINUX_LINK_BASE))
-
+#define pte_page(pte)          pfn_to_page(pte_pfn(pte))
 #define mk_pte(page, prot)     pfn_pte(page_to_pfn(page), prot)
-#define pte_pfn(pte)           virt_to_pfn(pte_val(pte))
-#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | \
-                                pgprot_val(prot)))
-#define __pte_index(addr)      (virt_to_pfn(addr) & (PTRS_PER_PTE - 1))
+#define pfn_pte(pfn, prot)     (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+
+/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
+#define pte_pfn(pte)           (pte_val(pte) >> PAGE_SHIFT)
+#define __pte_index(addr)      (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
 
 /*
  * pte_offset gets a @ptr to PMD entry (PGD in our 2-tier paging system)
index 5487d0b974005621a8cfaf119bdbf1f687d9ff7b..8be930394750a7cf7039a9dcc85cf6ee1994f4e6 100644 (file)
@@ -30,11 +30,16 @@ static const unsigned long low_mem_start = CONFIG_LINUX_LINK_BASE;
 static unsigned long low_mem_sz;
 
 #ifdef CONFIG_HIGHMEM
-static unsigned long min_high_pfn;
+static unsigned long min_high_pfn, max_high_pfn;
 static u64 high_mem_start;
 static u64 high_mem_sz;
 #endif
 
+#ifdef CONFIG_DISCONTIGMEM
+struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
+EXPORT_SYMBOL(node_data);
+#endif
+
 /* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
 static int __init setup_mem_sz(char *str)
 {
@@ -109,13 +114,11 @@ void __init setup_arch_memory(void)
        /* Last usable page of low mem */
        max_low_pfn = max_pfn = PFN_DOWN(low_mem_start + low_mem_sz);
 
-#ifdef CONFIG_HIGHMEM
-       min_high_pfn = PFN_DOWN(high_mem_start);
-       max_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+#ifdef CONFIG_FLATMEM
+       /* pfn_valid() uses this */
+       max_mapnr = max_low_pfn - min_low_pfn;
 #endif
 
-       max_mapnr = max_pfn - min_low_pfn;
-
        /*------------- bootmem allocator setup -----------------------*/
 
        /*
@@ -129,7 +132,7 @@ void __init setup_arch_memory(void)
         * the crash
         */
 
-       memblock_add(low_mem_start, low_mem_sz);
+       memblock_add_node(low_mem_start, low_mem_sz, 0);
        memblock_reserve(low_mem_start, __pa(_end) - low_mem_start);
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -149,13 +152,6 @@ void __init setup_arch_memory(void)
        zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
        zones_holes[ZONE_NORMAL] = 0;
 
-#ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
-
-       /* This handles the peripheral address space hole */
-       zones_holes[ZONE_HIGHMEM] = min_high_pfn - max_low_pfn;
-#endif
-
        /*
         * We can't use the helper free_area_init(zones[]) because it uses
         * PAGE_OFFSET to compute the @min_low_pfn which would be wrong
@@ -168,6 +164,34 @@ void __init setup_arch_memory(void)
                            zones_holes);       /* holes */
 
 #ifdef CONFIG_HIGHMEM
+       /*
+        * Populate a new node with highmem
+        *
+        * On ARC (w/o PAE) HIGHMEM addresses are actually smaller (0 based)
+        * than addresses in normal ala low memory (0x8000_0000 based).
+        * Even with PAE, the huge peripheral space hole would waste a lot of
+        * mem with single mem_map[]. This warrants a mem_map per region design.
+        * Thus HIGHMEM on ARC is imlemented with DISCONTIGMEM.
+        *
+        * DISCONTIGMEM in turns requires multiple nodes. node 0 above is
+        * populated with normal memory zone while node 1 only has highmem
+        */
+       node_set_online(1);
+
+       min_high_pfn = PFN_DOWN(high_mem_start);
+       max_high_pfn = PFN_DOWN(high_mem_start + high_mem_sz);
+
+       zones_size[ZONE_NORMAL] = 0;
+       zones_holes[ZONE_NORMAL] = 0;
+
+       zones_size[ZONE_HIGHMEM] = max_high_pfn - min_high_pfn;
+       zones_holes[ZONE_HIGHMEM] = 0;
+
+       free_area_init_node(1,                  /* node-id */
+                           zones_size,         /* num pages per zone */
+                           min_high_pfn,       /* first pfn of node */
+                           zones_holes);       /* holes */
+
        high_memory = (void *)(min_high_pfn << PAGE_SHIFT);
        kmap_init();
 #endif
@@ -185,7 +209,7 @@ void __init mem_init(void)
        unsigned long tmp;
 
        reset_all_zones_managed_pages();
-       for (tmp = min_high_pfn; tmp < max_pfn; tmp++)
+       for (tmp = min_high_pfn; tmp < max_high_pfn; tmp++)
                free_highmem_page(pfn_to_page(tmp));
 #endif
 
index 0827d594b1f0ef3750146690c5ce11babf1fc65b..cd0cd5fd09a33bdf17b3f609d1e3497b446c9412 100644 (file)
 
                        pmc: pmc@fffffc00 {
                                compatible = "atmel,at91sam9x5-pmc", "syscon";
-                               reg = <0xfffffc00 0x100>;
+                               reg = <0xfffffc00 0x200>;
                                interrupts = <1 IRQ_TYPE_LEVEL_HIGH 7>;
                                interrupt-controller;
                                #address-cells = <1>;
index b3c26a96a7262bdc2356060f900cc9b0c8bbd8eb..d9e2d9c6e999e4aa6f3a4c8f582fe9caf74953f4 100644 (file)
        regulator-name = "V28";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <2800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on; /* due to battery cover sensor */
 };
 
        regulator-name = "VCSI";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux3 {
        regulator-name = "VMMC2_30";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <3000000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vaux4 {
        regulator-name = "VCAM_ANA_28";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <2800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc1 {
        regulator-name = "VMMC1";
        regulator-min-microvolt = <1850000>;
        regulator-max-microvolt = <3150000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vmmc2 {
        regulator-name = "V28_A";
        regulator-min-microvolt = <2800000>;
        regulator-max-microvolt = <3000000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on; /* due VIO leak to AIC34 VDDs */
 };
 
        regulator-name = "VPLL";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on;
 };
 
        regulator-name = "VSDI_CSI";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
        regulator-always-on;
 };
 
        regulator-name = "VMMC2_IO_18";
        regulator-min-microvolt = <1800000>;
        regulator-max-microvolt = <1800000>;
+       regulator-initial-mode = <0x0e>; /* RES_STATE_ACTIVE */
 };
 
 &vio {
index 387dc31822fe9c8b2729d28da389d410ffb55fe7..96f8ce7bd2afc0aa5507d81cf98e59b4eddd61ec 100644 (file)
@@ -46,7 +46,7 @@
                               0x480bd800 0x017c>;
                        interrupts = <24>;
                        iommus = <&mmu_isp>;
-                       syscon = <&scm_conf 0xdc>;
+                       syscon = <&scm_conf 0x6c>;
                        ti,phy-type = <OMAP3ISP_PHY_TYPE_COMPLEX_IO>;
                        #clock-cells = <1>;
                        ports {
index 902657d6713b073df82d33158d3d1cd0fac5ef16..914bf4c47404f641f823a122c26ff93127f8d25e 100644 (file)
                                ldo1_reg: ldo1 {
                                        /* VDDAPHY_CAM: vdda_csiport */
                                        regulator-name = "ldo1";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldo4_reg: ldo4 {
                                        /* VDDAPHY_DISP: vdda_dsiport/hdmi */
                                        regulator-name = "ldo4";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
index ecc591dc0778ddbf77222d417884387511d31f0d..4d87d9c6c86d85ec32ae1bc4444fdd06ed5b317e 100644 (file)
                                ldo1_reg: ldo1 {
                                        /* VDDAPHY_CAM: vdda_csiport */
                                        regulator-name = "ldo1";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
                                ldo4_reg: ldo4 {
                                        /* VDDAPHY_DISP: vdda_dsiport/hdmi */
                                        regulator-name = "ldo4";
-                                       regulator-min-microvolt = <1500000>;
+                                       regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
                                };
 
index 38805ebbe2ba49f5dcd2ee92d80cde1e324cbb56..120b6b80cd39eacc2c874768e89eb14ab147b531 100644 (file)
                        omap5_pmx_wkup: pinmux@c840 {
                                compatible = "ti,omap5-padconf",
                                             "pinctrl-single";
-                               reg = <0xc840 0x0038>;
+                               reg = <0xc840 0x003c>;
                                #address-cells = <1>;
                                #size-cells = <0>;
                                #interrupt-cells = <1>;
index 65d0e8d9825947c68b06e53b162d10116df084c0..04f541bffbdd52d677c77cc717d2d82c63f3c549 100644 (file)
                };
 
                sata0: sata@29000000 {
-                       compatible              = "generic-ahci";
+                       compatible              = "qcom,apq8064-ahci", "generic-ahci";
                        status                  = "disabled";
                        reg                     = <0x29000000 0x180>;
                        interrupts              = <GIC_SPI 209 IRQ_TYPE_NONE>;
 
                        phys                    = <&sata_phy0>;
                        phy-names               = "sata-phy";
+                       ports-implemented       = <0x1>;
                };
 
                /* Temporary fixed regulator */
index 78996bdbd3df38c30fb0a0051cd0db3e9a9249eb..9817090c1b731540a2dcd4cb3dc480608040fbfa 100644 (file)
                        status = "disabled";
 
                        nfc@c0000000 {
-                               compatible = "atmel,sama5d4-nfc";
+                               compatible = "atmel,sama5d3-nfc";
                                #address-cells = <1>;
                                #size-cells = <1>;
                                reg = < /* NFC Command Registers */
index 9d2b7e2f5975ed3dae7d9d107a5a0ed5c80369e9..346a49d805a7becd411cda921c7508edb35a3aa8 100644 (file)
 };
 
 &reg_dc1sw {
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
        regulator-name = "vcc-lcd";
 };
 
index fc8ba1663601e0743a05b7cda52703df9f9bc07a..99d9f630d6b6838954bde8ddbdd90b0b5ace17b8 100644 (file)
@@ -84,6 +84,7 @@
 
 #ifndef __ASSEMBLY__
 
+#ifdef CONFIG_CPU_CP15_MMU
 static inline unsigned int get_domain(void)
 {
        unsigned int domain;
@@ -103,6 +104,16 @@ static inline void set_domain(unsigned val)
          : : "r" (val) : "memory");
        isb();
 }
+#else
+static inline unsigned int get_domain(void)
+{
+       return 0;
+}
+
+static inline void set_domain(unsigned val)
+{
+}
+#endif
 
 #ifdef CONFIG_CPU_USE_DOMAINS
 #define modify_domain(dom,type)                                        \
index e0eea72deb87eb1ba3967b464a8848256565ee6c..a708fa1f090579228363f8a5f0427db16d31b3a1 100644 (file)
 #include <asm/mach/map.h>
 #include <asm/mmu_context.h>
 #include <asm/pgtable.h>
+#include <asm/ptrace.h>
 
 #ifdef CONFIG_EFI
 void efi_init(void);
 
 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
 
-#define efi_call_virt(f, ...)                                          \
-({                                                                     \
-       efi_##f##_t *__f;                                               \
-       efi_status_t __s;                                               \
-                                                                       \
-       efi_virtmap_load();                                             \
-       __f = efi.systab->runtime->f;                                   \
-       __s = __f(__VA_ARGS__);                                         \
-       efi_virtmap_unload();                                           \
-       __s;                                                            \
-})
+#define arch_efi_call_virt_setup()     efi_virtmap_load()
+#define arch_efi_call_virt_teardown()  efi_virtmap_unload()
 
-#define __efi_call_virt(f, ...)                                                \
+#define arch_efi_call_virt(f, args...)                                 \
 ({                                                                     \
        efi_##f##_t *__f;                                               \
-                                                                       \
-       efi_virtmap_load();                                             \
        __f = efi.systab->runtime->f;                                   \
-       __f(__VA_ARGS__);                                               \
-       efi_virtmap_unload();                                           \
+       __f(args);                                                      \
 })
 
+#define ARCH_EFI_IRQ_FLAGS_MASK \
+       (PSR_J_BIT | PSR_E_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT | \
+        PSR_T_BIT | MODE_MASK)
+
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
        check_and_switch_context(mm, NULL);
@@ -59,7 +53,16 @@ void efi_virtmap_unload(void);
 
 /* arch specific definitions used by the stub code */
 
-#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+#define efi_call_early(f, ...)         sys_table_arg->boottime->f(__VA_ARGS__)
+#define __efi_call_early(f, ...)       f(__VA_ARGS__)
+#define efi_is_64bit()                 (false)
+
+struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg);
+void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si);
+
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+}
 
 /*
  * A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
index ff8a9d8acfaca727517c76bbefe4bbdd4755f73a..9f43ba012d1077ef553617bff01cf002fba70613 100644 (file)
 #include <asm/mach/map.h>
 #include <asm/mmu_context.h>
 
+static int __init set_permissions(pte_t *ptep, pgtable_t token,
+                                 unsigned long addr, void *data)
+{
+       efi_memory_desc_t *md = data;
+       pte_t pte = *ptep;
+
+       if (md->attribute & EFI_MEMORY_RO)
+               pte = set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
+       if (md->attribute & EFI_MEMORY_XP)
+               pte = set_pte_bit(pte, __pgprot(L_PTE_XN));
+       set_pte_ext(ptep, pte, PTE_EXT_NG);
+       return 0;
+}
+
+int __init efi_set_mapping_permissions(struct mm_struct *mm,
+                                      efi_memory_desc_t *md)
+{
+       unsigned long base, size;
+
+       base = md->virt_addr;
+       size = md->num_pages << EFI_PAGE_SHIFT;
+
+       /*
+        * We can only use apply_to_page_range() if we can guarantee that the
+        * entire region was mapped using pages. This should be the case if the
+        * region does not cover any naturally aligned SECTION_SIZE sized
+        * blocks.
+        */
+       if (round_down(base + size, SECTION_SIZE) <
+           round_up(base, SECTION_SIZE) + SECTION_SIZE)
+               return apply_to_page_range(mm, base, size, set_permissions, md);
+
+       return 0;
+}
+
 int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
 {
        struct map_desc desc = {
@@ -34,5 +69,11 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
                desc.type = MT_DEVICE;
 
        create_mapping_late(mm, &desc, true);
+
+       /*
+        * If stricter permissions were specified, apply them now.
+        */
+       if (md->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))
+               return efi_set_mapping_permissions(mm, md);
        return 0;
 }
index 9b8c5a1134347aea35b7c60850d2b671efc9d768..fb1a69eb49c1a842fc604dc2b64f905e30a39aac 100644 (file)
@@ -236,7 +236,7 @@ ENTRY(__setup_mpu)
        mov     r0, #CONFIG_VECTORS_BASE        @ Cover from VECTORS_BASE
        ldr     r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
        /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
-       mov     r6, #(((PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
+       mov     r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)
 
        setup_region r0, r5, r6, MPU_DATA_SIDE  @ VECTORS_BASE, PL0 NA, enabled
        beq     3f                              @ Memory-map not unified
index 2c4bea39cf224f8368cca2b4dba61a5b807a3dde..7d4e2850910ce4cdbeb3e669c7846d782dfe6c8c 100644 (file)
@@ -883,7 +883,8 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
                request_resource(&ioport_resource, &lp2);
 }
 
-#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
+#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
+    defined(CONFIG_EFI)
 struct screen_info screen_info = {
  .orig_video_lines     = 30,
  .orig_video_cols      = 80,
index 58dbd5c439df45bc10497954db0e61b433646cd6..d6d4191e68f23cd1f80d8faf95fa1405d156b30a 100644 (file)
@@ -1004,7 +1004,7 @@ static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
        kvm_pfn_t pfn = *pfnp;
        gfn_t gfn = *ipap >> PAGE_SHIFT;
 
-       if (PageTransCompound(pfn_to_page(pfn))) {
+       if (PageTransCompoundMap(pfn_to_page(pfn))) {
                unsigned long mask;
                /*
                 * The address we faulted on is backed by a transparent huge
index d97c588550ad467775dfe8b05487eb03d540c2f4..bc4e63fa9808932b451931d13906d69dbe2bd3ee 100644 (file)
@@ -121,6 +121,11 @@ static void read_factory_config(struct nvmem_device *nvmem, void *context)
        const char *partnum = NULL;
        struct davinci_soc_info *soc_info = &davinci_soc_info;
 
+       if (!IS_BUILTIN(CONFIG_NVMEM)) {
+               pr_warn("Factory Config not available without CONFIG_NVMEM\n");
+               goto bad_config;
+       }
+
        ret = nvmem_device_read(nvmem, 0, sizeof(factory_config),
                                &factory_config);
        if (ret != sizeof(struct factory_config)) {
index f55ef2ef2f92eb88c9c7485e5bfda537af126d31..742133b7266a64d7ca551a475f64e82ff2fa3841 100644 (file)
@@ -33,6 +33,11 @@ void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context)
        char *mac_addr = davinci_soc_info.emac_pdata->mac_addr;
        off_t offset = (off_t)context;
 
+       if (!IS_BUILTIN(CONFIG_NVMEM)) {
+               pr_warn("Cannot read MAC addr from EEPROM without CONFIG_NVMEM\n");
+               return;
+       }
+
        /* Read MAC addr from EEPROM */
        if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN)
                pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr);
index 7c21760f590ffd0d4cd47fcafcbcaffe64a85952..875a2bab64f67b1a7dda20e216a264f01679f8a9 100644 (file)
@@ -92,7 +92,7 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
                        if (IS_ERR(pd->clk[i]))
                                break;
 
-                       if (IS_ERR(pd->clk[i]))
+                       if (IS_ERR(pd->pclk[i]))
                                continue; /* Skip on first power up */
                        if (clk_set_parent(pd->clk[i], pd->pclk[i]))
                                pr_err("%s: error setting parent to clock%d\n",
index 5d94b7a2fb108dc1bc1c5fcdd4edfa80ab2c0c7f..c160fa3007e943be451b82318149a69dce4d7e54 100644 (file)
@@ -13,6 +13,7 @@
 #include <asm/assembler.h>
 
        .arch   armv7-a
+       .arm
 
 ENTRY(secondary_trampoline)
        /* CPU1 will always fetch from 0x0 when it is brought out of reset.
index 1dd10936d68d0422b328128fbb6a234d77adee1d..d5805e4bf2fc71a0db019c198daedccb847a8376 100644 (file)
@@ -87,7 +87,6 @@ static unsigned long irbar_read(void)
 /* MPU initialisation functions */
 void __init sanity_check_meminfo_mpu(void)
 {
-       int i;
        phys_addr_t phys_offset = PHYS_OFFSET;
        phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
        struct memblock_region *reg;
@@ -110,11 +109,13 @@ void __init sanity_check_meminfo_mpu(void)
                } else {
                        /*
                         * memblock auto merges contiguous blocks, remove
-                        * all blocks afterwards
+                        * all blocks afterwards in one go (we can't remove
+                        * blocks separately while iterating)
                         */
                        pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
-                                 &mem_start, &reg->base);
-                       memblock_remove(reg->base, reg->size);
+                                 &mem_end, &reg->base);
+                       memblock_remove(reg->base, 0 - reg->base);
+                       break;
                }
        }
 
@@ -144,7 +145,7 @@ void __init sanity_check_meminfo_mpu(void)
                pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
                                &specified_mem_size, &aligned_region_size);
                memblock_remove(mem_start + aligned_region_size,
-                               specified_mem_size - aligned_round_size);
+                               specified_mem_size - aligned_region_size);
 
                mem_end = mem_start + aligned_region_size;
        }
@@ -261,7 +262,7 @@ void __init mpu_setup(void)
                return;
 
        region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
-                                       ilog2(meminfo.bank[0].size),
+                                       ilog2(memblock.memory.regions[0].size),
                                        MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
        if (region_err) {
                panic("MPU region initialization failure! %d", region_err);
@@ -285,7 +286,7 @@ void __init arm_mm_memblock_reserve(void)
         * some architectures which the DRAM is the exception vector to trap,
         * alloc_page breaks with error, although it is not NULL, but "0."
         */
-       memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
+       memblock_reserve(CONFIG_VECTORS_BASE, 2 * PAGE_SIZE);
 #else /* ifndef CONFIG_CPU_V7M */
        /*
         * There is no dedicated vector page on V7-M. So nothing needs to be
index efa77c146415b64d774a9811d30268e690d354d1..521b1ec5915759f61e0ef1ab6e8328ca0cd62256 100644 (file)
@@ -2,6 +2,7 @@ menu "Platform selection"
 
 config ARCH_SUNXI
        bool "Allwinner sunxi 64-bit SoC Family"
+       select GENERIC_IRQ_CHIP
        help
          This enables support for Allwinner sunxi based SoCs like the A64.
 
index a7315ebe3883f276456280de64d31537099cd292..706d2426024f7f0665c47dc37b92f566cb87a726 100644 (file)
                compatible = "fixed-clock";
                #clock-cells = <0>;
                clock-frequency = <0>;
-               status = "disabled";
        };
 
        soc {
index 8e88a696c9cbcbd2c8f717ea1c929b87750f1b03..622db3c6474e2d5c51b3a1689869534cf16019ee 100644 (file)
@@ -4,6 +4,7 @@
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/neon.h>
+#include <asm/ptrace.h>
 #include <asm/tlbflush.h>
 
 #ifdef CONFIG_EFI
@@ -14,32 +15,29 @@ extern void efi_init(void);
 
 int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
 
-#define efi_call_virt(f, ...)                                          \
+#define efi_set_mapping_permissions    efi_create_mapping
+
+#define arch_efi_call_virt_setup()                                     \
 ({                                                                     \
-       efi_##f##_t *__f;                                               \
-       efi_status_t __s;                                               \
-                                                                       \
        kernel_neon_begin();                                            \
        efi_virtmap_load();                                             \
-       __f = efi.systab->runtime->f;                                   \
-       __s = __f(__VA_ARGS__);                                         \
-       efi_virtmap_unload();                                           \
-       kernel_neon_end();                                              \
-       __s;                                                            \
 })
 
-#define __efi_call_virt(f, ...)                                                \
+#define arch_efi_call_virt(f, args...)                                 \
 ({                                                                     \
        efi_##f##_t *__f;                                               \
-                                                                       \
-       kernel_neon_begin();                                            \
-       efi_virtmap_load();                                             \
        __f = efi.systab->runtime->f;                                   \
-       __f(__VA_ARGS__);                                               \
+       __f(args);                                                      \
+})
+
+#define arch_efi_call_virt_teardown()                                  \
+({                                                                     \
        efi_virtmap_unload();                                           \
        kernel_neon_end();                                              \
 })
 
+#define ARCH_EFI_IRQ_FLAGS_MASK (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
+
 /* arch specific definitions used by the stub code */
 
 /*
@@ -50,7 +48,16 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
 #define EFI_FDT_ALIGN  SZ_2M   /* used by allocate_new_fdt_and_exit_boot() */
 #define MAX_FDT_OFFSET SZ_512M
 
-#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__)
+#define efi_call_early(f, ...)         sys_table_arg->boottime->f(__VA_ARGS__)
+#define __efi_call_early(f, ...)       f(__VA_ARGS__)
+#define efi_is_64bit()                 (true)
+
+#define alloc_screen_info(x...)                &screen_info
+#define free_screen_info(x...)
+
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+}
 
 #define EFI_ALLOC_ALIGN                SZ_64K
 
index b6abc852f2a142123150662bc6dd6bf5c3de62af..78f52488f9ff82461f2933689b44e7b0fcf74b80 100644 (file)
 
 #include <asm/efi.h>
 
-int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+/*
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+ * executable, everything else can be mapped with the XN bits
+ * set. Also take the new (optional) RO/XP bits into account.
+ */
+static __init pteval_t create_mapping_protection(efi_memory_desc_t *md)
 {
-       pteval_t prot_val;
+       u64 attr = md->attribute;
+       u32 type = md->type;
 
-       /*
-        * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
-        * executable, everything else can be mapped with the XN bits
-        * set.
-        */
-       if ((md->attribute & EFI_MEMORY_WB) == 0)
-               prot_val = PROT_DEVICE_nGnRE;
-       else if (md->type == EFI_RUNTIME_SERVICES_CODE ||
-                !PAGE_ALIGNED(md->phys_addr))
-               prot_val = pgprot_val(PAGE_KERNEL_EXEC);
-       else
-               prot_val = pgprot_val(PAGE_KERNEL);
+       if (type == EFI_MEMORY_MAPPED_IO)
+               return PROT_DEVICE_nGnRE;
+
+       if (WARN_ONCE(!PAGE_ALIGNED(md->phys_addr),
+                     "UEFI Runtime regions are not aligned to 64 KB -- buggy firmware?"))
+               /*
+                * If the region is not aligned to the page size of the OS, we
+                * can not use strict permissions, since that would also affect
+                * the mapping attributes of the adjacent regions.
+                */
+               return pgprot_val(PAGE_KERNEL_EXEC);
+
+       /* R-- */
+       if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
+           (EFI_MEMORY_XP | EFI_MEMORY_RO))
+               return pgprot_val(PAGE_KERNEL_RO);
+
+       /* R-X */
+       if (attr & EFI_MEMORY_RO)
+               return pgprot_val(PAGE_KERNEL_ROX);
+
+       /* RW- */
+       if (attr & EFI_MEMORY_XP || type != EFI_RUNTIME_SERVICES_CODE)
+               return pgprot_val(PAGE_KERNEL);
+
+       /* RWX */
+       return pgprot_val(PAGE_KERNEL_EXEC);
+}
+
+/* we will fill this structure from the stub, so don't put it in .bss */
+struct screen_info screen_info __section(.data);
+
+int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+{
+       pteval_t prot_val = create_mapping_protection(md);
 
        create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
                           md->num_pages << EFI_PAGE_SHIFT,
index 5e360ce88f10ba4110697552a47ea8c42053a02f..1428849aece8f98a52b4661c68c14ea555fe0000 100644 (file)
@@ -112,6 +112,7 @@ __efistub___memset          = KALLSYMS_HIDE(__pi_memset);
 __efistub__text                        = KALLSYMS_HIDE(_text);
 __efistub__end                 = KALLSYMS_HIDE(_end);
 __efistub__edata               = KALLSYMS_HIDE(_edata);
+__efistub_screen_info          = KALLSYMS_HIDE(screen_info);
 
 #endif
 
index a34420a5df9a2e5134beb26964f08c57d97c0d16..b405bbb544319e2a9a66ced51c12eaabb5621d25 100644 (file)
@@ -476,6 +476,7 @@ emit_cond_jmp:
                case BPF_JGE:
                        jmp_cond = A64_COND_CS;
                        break;
+               case BPF_JSET:
                case BPF_JNE:
                        jmp_cond = A64_COND_NE;
                        break;
index 300dac3702f11a13460d1954843bfcbd1ddc2034..bf0865cd438a4a25b2c516274ffed969a1dc64b3 100644 (file)
@@ -531,8 +531,6 @@ efi_init (void)
               efi.systab->hdr.revision >> 16,
               efi.systab->hdr.revision & 0xffff, vendor);
 
-       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
-
        palo_phys      = EFI_INVALID_TABLE_ADDR;
 
        if (efi_config_init(arch_tables) != 0)
index c976ebfe2269db83e7efcc803bcd14e390695e3b..57b4836b7ecd898e10197aa0d473ea6107b9fb42 100644 (file)
@@ -344,7 +344,7 @@ tracesys_next:
 #endif
 
        cmpib,COND(=),n -1,%r20,tracesys_exit /* seccomp may have returned -1 */
-       comiclr,>>=     __NR_Linux_syscalls, %r20, %r0
+       comiclr,>>      __NR_Linux_syscalls, %r20, %r0
        b,n     .Ltracesys_nosys
 
        LDREGX  %r20(%r19), %r19
index e4396a7d0f7cf5627a92ea8c07756aba6bc52c7a..4afe66aa1400d5d7e3783e696a0a1de12d3a316c 100644 (file)
@@ -82,7 +82,7 @@ static inline unsigned long create_zero_mask(unsigned long bits)
            "andc       %1,%1,%2\n\t"
            "popcntd    %0,%1"
                : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
-               : "r" (bits));
+               : "b" (bits));
 
        return leading_zero_bits;
 }
index fb23fd6b186a1b0ffeeace7cca9e9a5a136fbfa6..c74d3701ad6830fe88338c185be374f7a6730f07 100644 (file)
@@ -24,7 +24,6 @@ CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
 # CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
 CONFIG_INET6_AH=m
 CONFIG_INET6_ESP=m
 CONFIG_INET6_IPCOMP=m
index 04920ab8e292b75a99986b1a2d10ac6273e2db94..3583d676a9161f8197e826553162f011f390c1cd 100644 (file)
@@ -48,7 +48,6 @@ CONFIG_SYN_COOKIES=y
 CONFIG_INET_AH=y
 CONFIG_INET_ESP=y
 CONFIG_INET_IPCOMP=y
-CONFIG_IPV6_PRIVACY=y
 CONFIG_IPV6_ROUTER_PREF=y
 CONFIG_IPV6_ROUTE_INFO=y
 CONFIG_IPV6_OPTIMISTIC_DAD=y
index 56f933816144d47bf96506d09d3249445bc0fe32..1d8321c827a8821bb4e9f4989eb883cd761370db 100644 (file)
@@ -48,6 +48,7 @@
 #define SUN4V_CHIP_SPARC_M6    0x06
 #define SUN4V_CHIP_SPARC_M7    0x07
 #define SUN4V_CHIP_SPARC64X    0x8a
+#define SUN4V_CHIP_SPARC_SN    0x8b
 #define SUN4V_CHIP_UNKNOWN     0xff
 
 #ifndef __ASSEMBLY__
index b6de8b10a55b8b8f09eedb2c90d86906d5445686..36eee8132c22bac329e99fb7284211e11310ff26 100644 (file)
 #define __NR_setsockopt                355
 #define __NR_mlock2            356
 #define __NR_copy_file_range   357
+#define __NR_preadv2           358
+#define __NR_pwritev2          359
 
-#define NR_syscalls            358
+#define NR_syscalls            360
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index 4ee1ad420862d425cff03ba7aad8e395fcb75907..655628def68e6be60b7cd31bda369b2fbbfd6c6b 100644 (file)
@@ -214,8 +214,7 @@ do_dcpe_tl1_nonfatal:       /* Ok we may use interrupt globals safely. */
        subcc           %g1, %g2, %g1           ! Next cacheline
        bge,pt          %icc, 1b
         nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
+       ba,a,pt         %xcc, dcpe_icpe_tl1_common
 
 do_dcpe_tl1_fatal:
        sethi           %hi(1f), %g7
@@ -224,8 +223,7 @@ do_dcpe_tl1_fatal:
        mov             0x2, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_dcpe_tl1,.-do_dcpe_tl1
 
        .globl          do_icpe_tl1
@@ -259,8 +257,7 @@ do_icpe_tl1_nonfatal:       /* Ok we may use interrupt globals safely. */
        subcc           %g1, %g2, %g1
        bge,pt          %icc, 1b
         nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
+       ba,a,pt         %xcc, dcpe_icpe_tl1_common
 
 do_icpe_tl1_fatal:
        sethi           %hi(1f), %g7
@@ -269,8 +266,7 @@ do_icpe_tl1_fatal:
        mov             0x3, %o0
        call            cheetah_plus_parity_error
         add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_icpe_tl1,.-do_icpe_tl1
        
        .type           dcpe_icpe_tl1_common,#function
@@ -456,7 +452,7 @@ __cheetah_log_error:
         cmp            %g2, 0x63
        be              c_cee
         nop
-       ba,pt           %xcc, c_deferred
+       ba,a,pt         %xcc, c_deferred
        .size           __cheetah_log_error,.-__cheetah_log_error
 
        /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
index dfad8b1aea9fb042a40290c766a3bb7ede4e0480..493e023a468a919c61d77451e43e0a4a2e414bbe 100644 (file)
@@ -506,6 +506,12 @@ static void __init sun4v_cpu_probe(void)
                sparc_pmu_type = "sparc-m7";
                break;
 
+       case SUN4V_CHIP_SPARC_SN:
+               sparc_cpu_type = "SPARC-SN";
+               sparc_fpu_type = "SPARC-SN integrated FPU";
+               sparc_pmu_type = "sparc-sn";
+               break;
+
        case SUN4V_CHIP_SPARC64X:
                sparc_cpu_type = "SPARC64-X";
                sparc_fpu_type = "SPARC64-X integrated FPU";
index e69ec0e3f15527705b3ce28fc89ff5c0341f03fb..45c820e1cba5d949ff936f15392ca3c0c8578a34 100644 (file)
@@ -328,6 +328,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
        case SUN4V_CHIP_NIAGARA5:
        case SUN4V_CHIP_SPARC_M6:
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
        case SUN4V_CHIP_SPARC64X:
                rover_inc_table = niagara_iterate_method;
                break;
index a6864826a4bd963f0188bd1f5d64218cbdee1981..336d2750fe78c3d4d79175e3427e94859e0cca95 100644 (file)
@@ -100,8 +100,8 @@ do_fpdis:
        fmuld           %f0, %f2, %f26
        faddd           %f0, %f2, %f28
        fmuld           %f0, %f2, %f30
-       b,pt            %xcc, fpdis_exit
-        nop
+       ba,a,pt         %xcc, fpdis_exit
+
 2:     andcc           %g5, FPRS_DU, %g0
        bne,pt          %icc, 3f
         fzero          %f32
@@ -144,8 +144,8 @@ do_fpdis:
        fmuld           %f32, %f34, %f58
        faddd           %f32, %f34, %f60
        fmuld           %f32, %f34, %f62
-       ba,pt           %xcc, fpdis_exit
-        nop
+       ba,a,pt         %xcc, fpdis_exit
+
 3:     mov             SECONDARY_CONTEXT, %g3
        add             %g6, TI_FPREGS, %g1
 
@@ -197,8 +197,7 @@ fpdis_exit2:
 fp_other_bounce:
        call            do_fpother
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           fp_other_bounce,.-fp_other_bounce
 
        .align          32
index cd1f592cd3479f8c94c599bfc589087184d4d180..a076b4249e622a4ebfe47eab6c9bcd7cb98b36ce 100644 (file)
@@ -414,6 +414,8 @@ sun4v_chip_type:
        cmp     %g2, 'T'
        be,pt   %xcc, 70f
         cmp    %g2, 'M'
+       be,pt   %xcc, 70f
+        cmp    %g2, 'S'
        bne,pn  %xcc, 49f
         nop
 
@@ -433,6 +435,9 @@ sun4v_chip_type:
        cmp     %g2, '7'
        be,pt   %xcc, 5f
         mov    SUN4V_CHIP_SPARC_M7, %g4
+       cmp     %g2, 'N'
+       be,pt   %xcc, 5f
+        mov    SUN4V_CHIP_SPARC_SN, %g4
        ba,pt   %xcc, 49f
         nop
 
@@ -461,9 +466,8 @@ sun4v_chip_type:
        subcc   %g3, 1, %g3
        bne,pt  %xcc, 41b
        add     %g1, 1, %g1
-       mov     SUN4V_CHIP_SPARC64X, %g4
        ba,pt   %xcc, 5f
-       nop
+        mov    SUN4V_CHIP_SPARC64X, %g4
 
 49:
        mov     SUN4V_CHIP_UNKNOWN, %g4
@@ -548,8 +552,7 @@ sun4u_init:
        stxa            %g0, [%g7] ASI_DMMU
        membar  #Sync
 
-       ba,pt           %xcc, sun4u_continue
-        nop
+       ba,a,pt         %xcc, sun4u_continue
 
 sun4v_init:
        /* Set ctx 0 */
@@ -560,14 +563,12 @@ sun4v_init:
        mov             SECONDARY_CONTEXT, %g7
        stxa            %g0, [%g7] ASI_MMU
        membar          #Sync
-       ba,pt           %xcc, niagara_tlb_fixup
-        nop
+       ba,a,pt         %xcc, niagara_tlb_fixup
 
 sun4u_continue:
        BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
 
-       ba,pt   %xcc, spitfire_tlb_fixup
-        nop
+       ba,a,pt %xcc, spitfire_tlb_fixup
 
 niagara_tlb_fixup:
        mov     3, %g2          /* Set TLB type to hypervisor. */
@@ -595,6 +596,9 @@ niagara_tlb_fixup:
        be,pt   %xcc, niagara4_patch
         nop
        cmp     %g1, SUN4V_CHIP_SPARC_M7
+       be,pt   %xcc, niagara4_patch
+        nop
+       cmp     %g1, SUN4V_CHIP_SPARC_SN
        be,pt   %xcc, niagara4_patch
         nop
 
@@ -639,8 +643,7 @@ niagara_patch:
        call    hypervisor_patch_cachetlbops
         nop
 
-       ba,pt   %xcc, tlb_fixup_done
-        nop
+       ba,a,pt %xcc, tlb_fixup_done
 
 cheetah_tlb_fixup:
        mov     2, %g2          /* Set TLB type to cheetah+. */
@@ -659,8 +662,7 @@ cheetah_tlb_fixup:
        call    cheetah_patch_cachetlbops
         nop
 
-       ba,pt   %xcc, tlb_fixup_done
-        nop
+       ba,a,pt %xcc, tlb_fixup_done
 
 spitfire_tlb_fixup:
        /* Set TLB type to spitfire. */
@@ -774,8 +776,7 @@ setup_trap_table:
        call    %o1
         add    %sp, (2047 + 128), %o0
 
-       ba,pt   %xcc, 2f
-        nop
+       ba,a,pt %xcc, 2f
 
 1:     sethi   %hi(sparc64_ttable_tl0), %o0
        set     prom_set_trap_table_name, %g2
@@ -814,8 +815,7 @@ setup_trap_table:
 
        BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
 
-       ba,pt   %xcc, 2f
-        nop
+       ba,a,pt %xcc, 2f
 
        /* Disable STICK_INT interrupts. */
 1:
index 753b4f031bfb710b77cefa78b74ade61a9d8850c..34b4933900bf7665b30e791565795d5782351b60 100644 (file)
@@ -18,8 +18,7 @@ __do_privact:
 109:   or              %g7, %lo(109b), %g7
        call            do_privact
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __do_privact,.-__do_privact
 
        .type           do_mna,#function
@@ -46,8 +45,7 @@ do_mna:
        mov             %l5, %o2
        call            mem_address_unaligned
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_mna,.-do_mna
 
        .type           do_lddfmna,#function
@@ -65,8 +63,7 @@ do_lddfmna:
        mov             %l5, %o2
        call            handle_lddfmna
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_lddfmna,.-do_lddfmna
 
        .type           do_stdfmna,#function
@@ -84,8 +81,7 @@ do_stdfmna:
        mov             %l5, %o2
        call            handle_stdfmna
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           do_stdfmna,.-do_stdfmna
 
        .type           breakpoint_trap,#function
index badf0951d73c8f2d875aa5cbc093caef04616aeb..c2b202d763a16ae74d1d150ed6e7dd940341d7dd 100644 (file)
@@ -245,6 +245,18 @@ static void pci_parse_of_addrs(struct platform_device *op,
        }
 }
 
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+                                 void *stc, void *host_controller,
+                                 struct platform_device  *op,
+                                 int numa_node)
+{
+       sd->iommu = iommu;
+       sd->stc = stc;
+       sd->host_controller = host_controller;
+       sd->op = op;
+       sd->numa_node = numa_node;
+}
+
 static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
                                         struct device_node *node,
                                         struct pci_bus *bus, int devfn)
@@ -259,13 +271,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
        if (!dev)
                return NULL;
 
+       op = of_find_device_by_node(node);
        sd = &dev->dev.archdata;
-       sd->iommu = pbm->iommu;
-       sd->stc = &pbm->stc;
-       sd->host_controller = pbm;
-       sd->op = op = of_find_device_by_node(node);
-       sd->numa_node = pbm->numa_node;
-
+       pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+                             pbm->numa_node);
        sd = &op->dev.archdata;
        sd->iommu = pbm->iommu;
        sd->stc = &pbm->stc;
@@ -994,6 +1003,27 @@ void pcibios_set_master(struct pci_dev *dev)
        /* No special bus mastering setup handling */
 }
 
+#ifdef CONFIG_PCI_IOV
+int pcibios_add_device(struct pci_dev *dev)
+{
+       struct pci_dev *pdev;
+
+       /* Add sriov arch specific initialization here.
+        * Copy dev_archdata from PF to VF
+        */
+       if (dev->is_virtfn) {
+               struct dev_archdata *psd;
+
+               pdev = dev->physfn;
+               psd = &pdev->dev.archdata;
+               pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+                                     psd->stc, psd->host_controller, NULL,
+                                     psd->numa_node);
+       }
+       return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
 static int __init pcibios_init(void)
 {
        pci_dfl_cache_line_size = 64 >> 2;
index 26db95b54ee94c44537590a1b94a4870bd9227ce..599f1207eed2e469157e2475631d2537543b5794 100644 (file)
@@ -285,7 +285,8 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
-       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+           sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
                sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
                                         &__sun_m7_2insn_patch_end);
 
@@ -524,6 +525,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_BLKINIT;
                if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
@@ -532,6 +534,7 @@ static void __init init_sparc64_elf_hwcap(void)
                    sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                   sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                    sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                        cap |= HWCAP_SPARC_N2;
        }
@@ -561,6 +564,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
                                        AV_SPARC_ASI_BLK_INIT |
@@ -570,6 +574,7 @@ static void __init init_sparc64_elf_hwcap(void)
                            sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+                           sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
                            sun4v_chip_type == SUN4V_CHIP_SPARC64X)
                                cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
                                        AV_SPARC_FMAF);
index c357e40ffd01526ca38a824c12ac077cdba135c6..4a73009f66a5727e43ad45dbe1a2f7bded8cd48f 100644 (file)
@@ -85,8 +85,7 @@ __spitfire_cee_trap_continue:
        ba,pt           %xcc, etraptl1
         rd             %pc, %g7
 
-       ba,pt           %xcc, 2f
-        nop
+       ba,a,pt         %xcc, 2f
 
 1:     ba,pt           %xcc, etrap_irq
         rd             %pc, %g7
@@ -100,8 +99,7 @@ __spitfire_cee_trap_continue:
        mov             %l5, %o2
        call            spitfire_access_error
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_access_error,.-__spitfire_access_error
 
        /* This is the trap handler entry point for ECC correctable
@@ -179,8 +177,7 @@ __spitfire_data_access_exception_tl1:
        mov             %l5, %o2
        call            spitfire_data_access_exception_tl1
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
 
        .type           __spitfire_data_access_exception,#function
@@ -200,8 +197,7 @@ __spitfire_data_access_exception:
        mov             %l5, %o2
        call            spitfire_data_access_exception
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_data_access_exception,.-__spitfire_data_access_exception
 
        .type           __spitfire_insn_access_exception_tl1,#function
@@ -220,8 +216,7 @@ __spitfire_insn_access_exception_tl1:
        mov             %l5, %o2
        call            spitfire_insn_access_exception_tl1
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
 
        .type           __spitfire_insn_access_exception,#function
@@ -240,6 +235,5 @@ __spitfire_insn_access_exception:
        mov             %l5, %o2
        call            spitfire_insn_access_exception
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
        .size           __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
index 6c3dd6c52f8bd09135e81f1d56704602d10c4f4e..eac7f0db5c8c6269a913152a11941f66f5f3e8d0 100644 (file)
@@ -88,4 +88,4 @@ sys_call_table:
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
 /*345*/        .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range
+/*355*/        .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
index 12b524cfcfa0120caabdf7aa5b8606ecc3978c96..b0f17ff2ddba2daa75a8e8a646b5661dfe0d36e9 100644 (file)
@@ -89,7 +89,7 @@ sys_call_table32:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
+       .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
 
 #endif /* CONFIG_COMPAT */
 
@@ -170,4 +170,4 @@ sys_call_table:
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
        .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
 /*350*/        .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-       .word sys_setsockopt, sys_mlock2, sys_copy_file_range
+       .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
index b7f0f3f3a909b05b544da1a7b3d3e21d12bfa357..c731e8023d3e7c6333ae091f39ca76aedf332603 100644 (file)
@@ -11,8 +11,7 @@ utrap_trap:           /* %g3=handler,%g4=level */
        mov             %l4, %o1
         call           bad_trap
         add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
+       ba,a,pt         %xcc, rtrap
 
 invoke_utrap:
        sllx            %g3, 3, %g3
index cb5789c9f9613ed692733d50dcf0e2c39784b1f7..f6bb857254fcfa170155d4cd8dc8cb717c5bfb97 100644 (file)
@@ -45,6 +45,14 @@ static const struct vio_device_id *vio_match_device(
        return NULL;
 }
 
+static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
+{
+       const struct vio_dev *vio_dev = to_vio_dev(dev);
+
+       add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
+       return 0;
+}
+
 static int vio_bus_match(struct device *dev, struct device_driver *drv)
 {
        struct vio_dev *vio_dev = to_vio_dev(dev);
@@ -105,15 +113,25 @@ static ssize_t type_show(struct device *dev,
        return sprintf(buf, "%s\n", vdev->type);
 }
 
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
+{
+       const struct vio_dev *vdev = to_vio_dev(dev);
+
+       return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
+}
+
 static struct device_attribute vio_dev_attrs[] = {
        __ATTR_RO(devspec),
        __ATTR_RO(type),
+       __ATTR_RO(modalias),
        __ATTR_NULL
 };
 
 static struct bus_type vio_bus_type = {
        .name           = "vio",
        .dev_attrs      = vio_dev_attrs,
+       .uevent         = vio_hotplug,
        .match          = vio_bus_match,
        .probe          = vio_device_probe,
        .remove         = vio_device_remove,
index aadd321aa05db983af75a13ee14f2693ad75ed5c..7d02b1fef0256bb84c218e55b4d2328d037f5903 100644 (file)
@@ -33,6 +33,10 @@ ENTRY(_start)
 jiffies = jiffies_64;
 #endif
 
+#ifdef CONFIG_SPARC64
+ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
+#endif
+
 SECTIONS
 {
 #ifdef CONFIG_SPARC64
index 1e67ce95836972d439b50c88aa77d8395a543652..855019a8590ea5d556b71c9ae9356f7dcb629588 100644 (file)
@@ -32,8 +32,7 @@ fill_fixup:
         rd     %pc, %g7
        call    do_sparc64_fault
         add    %sp, PTREGS_OFF, %o0
-       ba,pt   %xcc, rtrap
-        nop
+       ba,a,pt %xcc, rtrap
 
        /* Be very careful about usage of the trap globals here.
         * You cannot touch %g5 as that has the fault information.
index 1cfe6aab7a11572d54f6848fe4656b7b40af8cf2..09e838801e397b4071b6604852e24a2a799e0a86 100644 (file)
@@ -1769,6 +1769,7 @@ static void __init setup_page_offset(void)
                        max_phys_bits = 47;
                        break;
                case SUN4V_CHIP_SPARC_M7:
+               case SUN4V_CHIP_SPARC_SN:
                default:
                        /* M7 and later support 52-bit virtual addresses.  */
                        sparc64_va_hole_top =    0xfff8000000000000UL;
@@ -1986,6 +1987,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
                pagecv_flag = 0x00;
                break;
        default:
@@ -2138,6 +2140,7 @@ void __init paging_init(void)
         */
        switch (sun4v_chip_type) {
        case SUN4V_CHIP_SPARC_M7:
+       case SUN4V_CHIP_SPARC_SN:
                page_cache4v_flag = _PAGE_CP_4V;
                break;
        default:
index 583d539a41977a2c7397b9fd38e0839d3c41dbb9..52fef606bc54258b7095aa6b4b16eda8fbf16244 100644 (file)
@@ -571,312 +571,6 @@ free_handle:
        efi_call_early(free_pool, pci_handle);
 }
 
-static void
-setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
-                struct efi_pixel_bitmask pixel_info, int pixel_format)
-{
-       if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
-               si->lfb_depth = 32;
-               si->lfb_linelength = pixels_per_scan_line * 4;
-               si->red_size = 8;
-               si->red_pos = 0;
-               si->green_size = 8;
-               si->green_pos = 8;
-               si->blue_size = 8;
-               si->blue_pos = 16;
-               si->rsvd_size = 8;
-               si->rsvd_pos = 24;
-       } else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
-               si->lfb_depth = 32;
-               si->lfb_linelength = pixels_per_scan_line * 4;
-               si->red_size = 8;
-               si->red_pos = 16;
-               si->green_size = 8;
-               si->green_pos = 8;
-               si->blue_size = 8;
-               si->blue_pos = 0;
-               si->rsvd_size = 8;
-               si->rsvd_pos = 24;
-       } else if (pixel_format == PIXEL_BIT_MASK) {
-               find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
-               find_bits(pixel_info.green_mask, &si->green_pos,
-                         &si->green_size);
-               find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
-               find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
-                         &si->rsvd_size);
-               si->lfb_depth = si->red_size + si->green_size +
-                       si->blue_size + si->rsvd_size;
-               si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
-       } else {
-               si->lfb_depth = 4;
-               si->lfb_linelength = si->lfb_width / 2;
-               si->red_size = 0;
-               si->red_pos = 0;
-               si->green_size = 0;
-               si->green_pos = 0;
-               si->blue_size = 0;
-               si->blue_pos = 0;
-               si->rsvd_size = 0;
-               si->rsvd_pos = 0;
-       }
-}
-
-static efi_status_t
-__gop_query32(struct efi_graphics_output_protocol_32 *gop32,
-             struct efi_graphics_output_mode_info **info,
-             unsigned long *size, u64 *fb_base)
-{
-       struct efi_graphics_output_protocol_mode_32 *mode;
-       efi_status_t status;
-       unsigned long m;
-
-       m = gop32->mode;
-       mode = (struct efi_graphics_output_protocol_mode_32 *)m;
-
-       status = efi_early->call(gop32->query_mode, gop32,
-                                mode->mode, size, info);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       *fb_base = mode->frame_buffer_base;
-       return status;
-}
-
-static efi_status_t
-setup_gop32(struct screen_info *si, efi_guid_t *proto,
-           unsigned long size, void **gop_handle)
-{
-       struct efi_graphics_output_protocol_32 *gop32, *first_gop;
-       unsigned long nr_gops;
-       u16 width, height;
-       u32 pixels_per_scan_line;
-       u32 ext_lfb_base;
-       u64 fb_base;
-       struct efi_pixel_bitmask pixel_info;
-       int pixel_format;
-       efi_status_t status;
-       u32 *handles = (u32 *)(unsigned long)gop_handle;
-       int i;
-
-       first_gop = NULL;
-       gop32 = NULL;
-
-       nr_gops = size / sizeof(u32);
-       for (i = 0; i < nr_gops; i++) {
-               struct efi_graphics_output_mode_info *info = NULL;
-               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
-               bool conout_found = false;
-               void *dummy = NULL;
-               u32 h = handles[i];
-               u64 current_fb_base;
-
-               status = efi_call_early(handle_protocol, h,
-                                       proto, (void **)&gop32);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               status = efi_call_early(handle_protocol, h,
-                                       &conout_proto, &dummy);
-               if (status == EFI_SUCCESS)
-                       conout_found = true;
-
-               status = __gop_query32(gop32, &info, &size, &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
-                       /*
-                        * Systems that use the UEFI Console Splitter may
-                        * provide multiple GOP devices, not all of which are
-                        * backed by real hardware. The workaround is to search
-                        * for a GOP implementing the ConOut protocol, and if
-                        * one isn't found, to just fall back to the first GOP.
-                        */
-                       width = info->horizontal_resolution;
-                       height = info->vertical_resolution;
-                       pixel_format = info->pixel_format;
-                       pixel_info = info->pixel_information;
-                       pixels_per_scan_line = info->pixels_per_scan_line;
-                       fb_base = current_fb_base;
-
-                       /*
-                        * Once we've found a GOP supporting ConOut,
-                        * don't bother looking any further.
-                        */
-                       first_gop = gop32;
-                       if (conout_found)
-                               break;
-               }
-       }
-
-       /* Did we find any GOPs? */
-       if (!first_gop)
-               goto out;
-
-       /* EFI framebuffer */
-       si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
-       si->lfb_width = width;
-       si->lfb_height = height;
-       si->lfb_base = fb_base;
-
-       ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
-       if (ext_lfb_base) {
-               si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-               si->ext_lfb_base = ext_lfb_base;
-       }
-
-       si->pages = 1;
-
-       setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
-       si->lfb_size = si->lfb_linelength * si->lfb_height;
-
-       si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
-       return status;
-}
-
-static efi_status_t
-__gop_query64(struct efi_graphics_output_protocol_64 *gop64,
-             struct efi_graphics_output_mode_info **info,
-             unsigned long *size, u64 *fb_base)
-{
-       struct efi_graphics_output_protocol_mode_64 *mode;
-       efi_status_t status;
-       unsigned long m;
-
-       m = gop64->mode;
-       mode = (struct efi_graphics_output_protocol_mode_64 *)m;
-
-       status = efi_early->call(gop64->query_mode, gop64,
-                                mode->mode, size, info);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       *fb_base = mode->frame_buffer_base;
-       return status;
-}
-
-static efi_status_t
-setup_gop64(struct screen_info *si, efi_guid_t *proto,
-           unsigned long size, void **gop_handle)
-{
-       struct efi_graphics_output_protocol_64 *gop64, *first_gop;
-       unsigned long nr_gops;
-       u16 width, height;
-       u32 pixels_per_scan_line;
-       u32 ext_lfb_base;
-       u64 fb_base;
-       struct efi_pixel_bitmask pixel_info;
-       int pixel_format;
-       efi_status_t status;
-       u64 *handles = (u64 *)(unsigned long)gop_handle;
-       int i;
-
-       first_gop = NULL;
-       gop64 = NULL;
-
-       nr_gops = size / sizeof(u64);
-       for (i = 0; i < nr_gops; i++) {
-               struct efi_graphics_output_mode_info *info = NULL;
-               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
-               bool conout_found = false;
-               void *dummy = NULL;
-               u64 h = handles[i];
-               u64 current_fb_base;
-
-               status = efi_call_early(handle_protocol, h,
-                                       proto, (void **)&gop64);
-               if (status != EFI_SUCCESS)
-                       continue;
-
-               status = efi_call_early(handle_protocol, h,
-                                       &conout_proto, &dummy);
-               if (status == EFI_SUCCESS)
-                       conout_found = true;
-
-               status = __gop_query64(gop64, &info, &size, &current_fb_base);
-               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
-                       /*
-                        * Systems that use the UEFI Console Splitter may
-                        * provide multiple GOP devices, not all of which are
-                        * backed by real hardware. The workaround is to search
-                        * for a GOP implementing the ConOut protocol, and if
-                        * one isn't found, to just fall back to the first GOP.
-                        */
-                       width = info->horizontal_resolution;
-                       height = info->vertical_resolution;
-                       pixel_format = info->pixel_format;
-                       pixel_info = info->pixel_information;
-                       pixels_per_scan_line = info->pixels_per_scan_line;
-                       fb_base = current_fb_base;
-
-                       /*
-                        * Once we've found a GOP supporting ConOut,
-                        * don't bother looking any further.
-                        */
-                       first_gop = gop64;
-                       if (conout_found)
-                               break;
-               }
-       }
-
-       /* Did we find any GOPs? */
-       if (!first_gop)
-               goto out;
-
-       /* EFI framebuffer */
-       si->orig_video_isVGA = VIDEO_TYPE_EFI;
-
-       si->lfb_width = width;
-       si->lfb_height = height;
-       si->lfb_base = fb_base;
-
-       ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
-       if (ext_lfb_base) {
-               si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
-               si->ext_lfb_base = ext_lfb_base;
-       }
-
-       si->pages = 1;
-
-       setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
-
-       si->lfb_size = si->lfb_linelength * si->lfb_height;
-
-       si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
-out:
-       return status;
-}
-
-/*
- * See if we have Graphics Output Protocol
- */
-static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto,
-                             unsigned long size)
-{
-       efi_status_t status;
-       void **gop_handle = NULL;
-
-       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
-                               size, (void **)&gop_handle);
-       if (status != EFI_SUCCESS)
-               return status;
-
-       status = efi_call_early(locate_handle,
-                               EFI_LOCATE_BY_PROTOCOL,
-                               proto, NULL, &size, gop_handle);
-       if (status != EFI_SUCCESS)
-               goto free_handle;
-
-       if (efi_early->is64)
-               status = setup_gop64(si, proto, size, gop_handle);
-       else
-               status = setup_gop32(si, proto, size, gop_handle);
-
-free_handle:
-       efi_call_early(free_pool, gop_handle);
-       return status;
-}
-
 static efi_status_t
 setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height)
 {
@@ -1038,7 +732,7 @@ void setup_graphics(struct boot_params *boot_params)
                                EFI_LOCATE_BY_PROTOCOL,
                                &graphics_proto, NULL, &size, gop_handle);
        if (status == EFI_BUFFER_TOO_SMALL)
-               status = setup_gop(si, &graphics_proto, size);
+               status = efi_setup_gop(NULL, si, &graphics_proto, size);
 
        if (status != EFI_SUCCESS) {
                size = 0;
index d487e727f1ec7347ea960b038428998d43a1d6bb..c0223f1a89d71229021ed2d15f99c50ac8c82166 100644 (file)
 
 #define DESC_TYPE_CODE_DATA    (1 << 0)
 
-#define EFI_CONSOLE_OUT_DEVICE_GUID    \
-       EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, \
-                 0x3f, 0xc1, 0x4d)
-
-#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR              0
-#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR              1
-#define PIXEL_BIT_MASK                                 2
-#define PIXEL_BLT_ONLY                                 3
-#define PIXEL_FORMAT_MAX                               4
-
-struct efi_pixel_bitmask {
-       u32 red_mask;
-       u32 green_mask;
-       u32 blue_mask;
-       u32 reserved_mask;
-};
-
-struct efi_graphics_output_mode_info {
-       u32 version;
-       u32 horizontal_resolution;
-       u32 vertical_resolution;
-       int pixel_format;
-       struct efi_pixel_bitmask pixel_information;
-       u32 pixels_per_scan_line;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_32 {
-       u32 max_mode;
-       u32 mode;
-       u32 info;
-       u32 size_of_info;
-       u64 frame_buffer_base;
-       u32 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_64 {
-       u32 max_mode;
-       u32 mode;
-       u64 info;
-       u64 size_of_info;
-       u64 frame_buffer_base;
-       u64 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode {
-       u32 max_mode;
-       u32 mode;
-       unsigned long info;
-       unsigned long size_of_info;
-       u64 frame_buffer_base;
-       unsigned long frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_32 {
-       u32 query_mode;
-       u32 set_mode;
-       u32 blt;
-       u32 mode;
-};
-
-struct efi_graphics_output_protocol_64 {
-       u64 query_mode;
-       u64 set_mode;
-       u64 blt;
-       u64 mode;
-};
-
-struct efi_graphics_output_protocol {
-       void *query_mode;
-       unsigned long set_mode;
-       unsigned long blt;
-       struct efi_graphics_output_protocol_mode *mode;
-};
-
 struct efi_uga_draw_protocol_32 {
        u32 get_mode;
        u32 set_mode;
index f9affcc3b9f134939c606c880be807785fd7a23e..9906505c998aac2e0c5b07736acc0798555ad3cd 100644 (file)
@@ -26,3 +26,6 @@ CONFIG_VIRTIO_NET=y
 CONFIG_9P_FS=y
 CONFIG_NET_9P=y
 CONFIG_NET_9P_VIRTIO=y
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_VIRTIO_INPUT=y
index b30dd8154cc244080c395d8913b3ab4fed1cadb3..4cddd17153fbe569ed1a1e9e0a9c17e39ba05be3 100644 (file)
 375    i386    membarrier              sys_membarrier
 376    i386    mlock2                  sys_mlock2
 377    i386    copy_file_range         sys_copy_file_range
-378    i386    preadv2                 sys_preadv2
-379    i386    pwritev2                sys_pwritev2
+378    i386    preadv2                 sys_preadv2                     compat_sys_preadv2
+379    i386    pwritev2                sys_pwritev2                    compat_sys_pwritev2
index 40625ca7a190953fc87c386a03017bab96903435..6011a573dd64995f0818667c1bb426518d689b78 100644 (file)
@@ -474,6 +474,7 @@ static __init int _init_perf_amd_iommu(
 
 static struct perf_amd_iommu __perf_iommu = {
        .pmu = {
+               .task_ctx_nr    = perf_invalid_context,
                .event_init     = perf_iommu_event_init,
                .add            = perf_iommu_add,
                .del            = perf_iommu_del,
index aff79884e17d2818531b978df08726c882cc9fed..5210eaa4aa629daa19424456d759b760d7d0a43a 100644 (file)
@@ -3637,6 +3637,8 @@ __init int intel_pmu_init(void)
                pr_cont("Knights Landing events, ");
                break;
 
+       case 142: /* 14nm Kabylake Mobile */
+       case 158: /* 14nm Kabylake Desktop */
        case 78: /* 14nm Skylake Mobile */
        case 94: /* 14nm Skylake Desktop */
        case 85: /* 14nm Skylake Server */
@@ -3706,7 +3708,7 @@ __init int intel_pmu_init(void)
                                c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
                        }
                        c->idxmsk64 &=
-                               ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
+                               ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
                        c->weight = hweight64(c->idxmsk64);
                }
        }
index 09a77dbc73c93110a40d2afbf2dedc86e50ea44f..7377814de30b6aaba0478bc91bf9156b5be6322b 100644 (file)
@@ -709,6 +709,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
 
        /* clear STOP and INT from current entry */
        buf->topa_index[buf->stop_pos]->stop = 0;
+       buf->topa_index[buf->stop_pos]->intr = 0;
        buf->topa_index[buf->intr_pos]->intr = 0;
 
        /* how many pages till the STOP marker */
@@ -733,6 +734,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
        buf->intr_pos = idx;
 
        buf->topa_index[buf->stop_pos]->stop = 1;
+       buf->topa_index[buf->stop_pos]->intr = 1;
        buf->topa_index[buf->intr_pos]->intr = 1;
 
        return 0;
index ab2bcaaebe38d464ab7863c901ac41fc7b847711..b2625867ebd17543401cf6dbc5bf9e76fcd6ee6c 100644 (file)
 #define KNL_CHA_MSR_PMON_BOX_FILTER_TID                0x1ff
 #define KNL_CHA_MSR_PMON_BOX_FILTER_STATE      (7 << 18)
 #define KNL_CHA_MSR_PMON_BOX_FILTER_OP         (0xfffffe2aULL << 32)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE        (0x1ULL << 32)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE (0x1ULL << 33)
+#define KNL_CHA_MSR_PMON_BOX_FILTER_NNC                (0x1ULL << 37)
 
 /* KNL EDC/MC UCLK */
 #define KNL_UCLK_MSR_PMON_CTR0_LOW             0x400
@@ -1902,6 +1905,10 @@ static int knl_cha_hw_config(struct intel_uncore_box *box,
                reg1->reg = HSWEP_C0_MSR_PMON_BOX_FILTER0 +
                            KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx;
                reg1->config = event->attr.config1 & knl_cha_filter_mask(idx);
+
+               reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_REMOTE_NODE;
+               reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_LOCAL_NODE;
+               reg1->config |= KNL_CHA_MSR_PMON_BOX_FILTER_NNC;
                reg1->idx = idx;
        }
        return 0;
index ec863b9a9f780c7507634353d64f9c2f76f1a0e1..8bef19f098d4d065a48a5d3e4ecc36b8158fc0ac 100644 (file)
@@ -166,7 +166,7 @@ again:
        if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
                delta = sign_extend64(delta, 31);
 
-       local64_add(now - prev, &event->count);
+       local64_add(delta, &event->count);
 }
 
 static void msr_event_start(struct perf_event *event, int flags)
index 53748c45e4885f574531f8cd6cf143f6dbc264d6..78d1e7467eae9fb9bd200cb04205c22322b01b4f 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <asm/fpu/api.h>
 #include <asm/pgtable.h>
+#include <asm/processor-flags.h>
 #include <asm/tlb.h>
 
 /*
 
 #define MAX_CMDLINE_ADDRESS    UINT_MAX
 
-#ifdef CONFIG_X86_32
+#define ARCH_EFI_IRQ_FLAGS_MASK        X86_EFLAGS_IF
 
+#ifdef CONFIG_X86_32
 
 extern unsigned long asmlinkage efi_call_phys(void *, ...);
 
+#define arch_efi_call_virt_setup()     kernel_fpu_begin()
+#define arch_efi_call_virt_teardown()  kernel_fpu_end()
+
 /*
  * Wrap all the virtual calls in a way that forces the parameters on the stack.
  */
-
-/* Use this macro if your virtual returns a non-void value */
-#define efi_call_virt(f, args...) \
+#define arch_efi_call_virt(f, args...)                                 \
 ({                                                                     \
-       efi_status_t __s;                                               \
-       kernel_fpu_begin();                                             \
-       __s = ((efi_##f##_t __attribute__((regparm(0)))*)               \
-               efi.systab->runtime->f)(args);                          \
-       kernel_fpu_end();                                               \
-       __s;                                                            \
-})
-
-/* Use this macro if your virtual call does not return any value */
-#define __efi_call_virt(f, args...) \
-({                                                                     \
-       kernel_fpu_begin();                                             \
        ((efi_##f##_t __attribute__((regparm(0)))*)                     \
                efi.systab->runtime->f)(args);                          \
-       kernel_fpu_end();                                               \
 })
 
 #define efi_ioremap(addr, size, type, attr)    ioremap_cache(addr, size)
@@ -78,10 +68,8 @@ struct efi_scratch {
        u64     phys_stack;
 } __packed;
 
-#define efi_call_virt(f, ...)                                          \
+#define arch_efi_call_virt_setup()                                     \
 ({                                                                     \
-       efi_status_t __s;                                               \
-                                                                       \
        efi_sync_low_kernel_mappings();                                 \
        preempt_disable();                                              \
        __kernel_fpu_begin();                                           \
@@ -91,9 +79,13 @@ struct efi_scratch {
                write_cr3((unsigned long)efi_scratch.efi_pgt);          \
                __flush_tlb_all();                                      \
        }                                                               \
-                                                                       \
-       __s = efi_call((void *)efi.systab->runtime->f, __VA_ARGS__);    \
-                                                                       \
+})
+
+#define arch_efi_call_virt(f, args...)                                 \
+       efi_call((void *)efi.systab->runtime->f, args)                  \
+
+#define arch_efi_call_virt_teardown()                                  \
+({                                                                     \
        if (efi_scratch.use_pgd) {                                      \
                write_cr3(efi_scratch.prev_cr3);                        \
                __flush_tlb_all();                                      \
@@ -101,15 +93,8 @@ struct efi_scratch {
                                                                        \
        __kernel_fpu_end();                                             \
        preempt_enable();                                               \
-       __s;                                                            \
 })
 
-/*
- * All X86_64 virt calls return non-void values. Thus, use non-void call for
- * virt calls that would be void on X86_32.
- */
-#define __efi_call_virt(f, args...) efi_call_virt(f, args)
-
 extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
                                        u32 type, u64 attribute);
 
@@ -180,6 +165,8 @@ static inline bool efi_runtime_supported(void)
 extern struct console early_efi_console;
 extern void parse_efi_setup(u64 phys_addr, u32 data_len);
 
+extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
+
 #ifdef CONFIG_EFI_MIXED
 extern void efi_thunk_runtime_setup(void);
 extern efi_status_t efi_thunk_set_virtual_address_map(
@@ -225,6 +212,11 @@ __pure const struct efi_config *__efi_early(void);
 #define efi_call_early(f, ...)                                         \
        __efi_early()->call(__efi_early()->f, __VA_ARGS__);
 
+#define __efi_call_early(f, ...)                                       \
+       __efi_early()->call((unsigned long)f, __VA_ARGS__);
+
+#define efi_is_64bit()         __efi_early()->is64
+
 extern bool efi_reboot_required(void);
 
 #else
index a969ae607be8323578865285b27cb443451b4483..2e7513d1f1f45eb3bb7ec4ebe459aec2fa76b46d 100644 (file)
@@ -108,6 +108,14 @@ struct exception_table_entry {
 
 #define ARCH_HAS_RELATIVE_EXTABLE
 
+#define swap_ex_entry_fixup(a, b, tmp, delta)                  \
+       do {                                                    \
+               (a)->fixup = (b)->fixup + (delta);              \
+               (b)->fixup = (tmp).fixup - (delta);             \
+               (a)->handler = (b)->handler + (delta);          \
+               (b)->handler = (tmp).handler - (delta);         \
+       } while (0)
+
 extern int fixup_exception(struct pt_regs *regs, int trapnr);
 extern bool ex_has_fault_handler(unsigned long ip);
 extern int early_fixup_exception(unsigned long *ip);
index 8f4942e2bcbb21584a78225b1208adefe86e0aa9..d7ce96a7dacaedc630a0e9a5ef5b761edbd0ffc7 100644 (file)
@@ -891,9 +891,7 @@ void __init uv_system_init(void)
        }
        pr_info("UV: Found %s hub\n", hub);
 
-       /* We now only need to map the MMRs on UV1 */
-       if (is_uv1_hub())
-               map_low_mmrs();
+       map_low_mmrs();
 
        m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
        m_val = m_n_config.s.m_skt;
index 1f7fdb91a818bc10d4b975a070d6b2c1a947b15b..e4393bfc7f0d9bb39520adceeae2968652779a5c 100644 (file)
@@ -336,7 +336,7 @@ static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
 {
        unsigned int eax, ebx, ecx, edx;
 
-       if (c->cpuid_level < 4)
+       if (!IS_ENABLED(CONFIG_SMP) || c->cpuid_level < 4)
                return 1;
 
        /* Intel has a non-standard dependency on %ecx for this CPUID level. */
index ab0adc0fa5db4da281de34e46c71e2cfbb707c07..a9b31eb815f23e93eae56960879448d7a63fba09 100644 (file)
@@ -535,6 +535,15 @@ static void native_machine_emergency_restart(void)
        mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
        *((unsigned short *)__va(0x472)) = mode;
 
+       /*
+        * If an EFI capsule has been registered with the firmware then
+        * override the reboot= parameter.
+        */
+       if (efi_capsule_pending(NULL)) {
+               pr_info("EFI capsule is pending, forcing EFI reboot.\n");
+               reboot_type = BOOT_EFI;
+       }
+
        for (;;) {
                /* Could also try the reset bit in the Hammer NB */
                switch (reboot_type) {
index 548ddf7d6fd2085676328ffcde6375ce3e88b477..3e84ef16f65799459cd55e0c1bce50ae43a9195e 100644 (file)
@@ -248,18 +248,17 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
        if (config_enabled(CONFIG_X86_64))
                sp -= 128;
 
-       if (!onsigstack) {
-               /* This is the X/Open sanctioned signal stack switching.  */
-               if (ka->sa.sa_flags & SA_ONSTACK) {
-                       if (current->sas_ss_size)
-                               sp = current->sas_ss_sp + current->sas_ss_size;
-               } else if (config_enabled(CONFIG_X86_32) &&
-                          (regs->ss & 0xffff) != __USER_DS &&
-                          !(ka->sa.sa_flags & SA_RESTORER) &&
-                          ka->sa.sa_restorer) {
-                               /* This is the legacy signal stack switching. */
-                               sp = (unsigned long) ka->sa.sa_restorer;
-               }
+       /* This is the X/Open sanctioned signal stack switching.  */
+       if (ka->sa.sa_flags & SA_ONSTACK) {
+               if (sas_ss_flags(sp) == 0)
+                       sp = current->sas_ss_sp + current->sas_ss_size;
+       } else if (config_enabled(CONFIG_X86_32) &&
+                  !onsigstack &&
+                  (regs->ss & 0xffff) != __USER_DS &&
+                  !(ka->sa.sa_flags & SA_RESTORER) &&
+                  ka->sa.sa_restorer) {
+               /* This is the legacy signal stack switching. */
+               sp = (unsigned long) ka->sa.sa_restorer;
        }
 
        if (fpu->fpstate_active) {
index a2065d3b3b396f4503f4e4f42acc2af2bd4b307b..0e4329ed91ef61da5a86e5d604d1ad96109efbfb 100644 (file)
@@ -332,6 +332,11 @@ static void __init smp_init_package_map(void)
         * primary cores.
         */
        ncpus = boot_cpu_data.x86_max_cores;
+       if (!ncpus) {
+               pr_warn("x86_max_cores == zero !?!?");
+               ncpus = 1;
+       }
+
        __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
 
        /*
index b285d4e8c68e33bf3d4f2b070fdbcbbce7036d9a..623965e86b65eda431b8e5fdbc2204c47bcb8b33 100644 (file)
@@ -68,6 +68,21 @@ struct efifb_dmi_info efifb_dmi_list[] = {
        [M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE }
 };
 
+void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+       int i;
+
+       for (i = 0; i < M_UNKNOWN; i++) {
+               if (efifb_dmi_list[i].base != 0 &&
+                   !strcmp(opt, efifb_dmi_list[i].optname)) {
+                       si->lfb_base = efifb_dmi_list[i].base;
+                       si->lfb_linelength = efifb_dmi_list[i].stride;
+                       si->lfb_width = efifb_dmi_list[i].width;
+                       si->lfb_height = efifb_dmi_list[i].height;
+               }
+       }
+}
+
 #define choose_value(dmivalue, fwvalue, field, flags) ({       \
                typeof(fwvalue) _ret_ = fwvalue;                \
                if ((flags) & (field))                          \
@@ -106,14 +121,24 @@ static int __init efifb_set_system(const struct dmi_system_id *id)
                                        continue;
                                for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
                                        resource_size_t start, end;
+                                       unsigned long flags;
+
+                                       flags = pci_resource_flags(dev, i);
+                                       if (!(flags & IORESOURCE_MEM))
+                                               continue;
+
+                                       if (flags & IORESOURCE_UNSET)
+                                               continue;
+
+                                       if (pci_resource_len(dev, i) == 0)
+                                               continue;
 
                                        start = pci_resource_start(dev, i);
-                                       if (start == 0)
-                                               break;
                                        end = pci_resource_end(dev, i);
                                        if (screen_info.lfb_base >= start &&
                                            screen_info.lfb_base < end) {
                                                found_bar = 1;
+                                               break;
                                        }
                                }
                        }
index 92ae6acac8a7fbcb9b91cb386b3c23015c5377ea..6aa0f4d9eea6816bbf0c78514e0b76d8f4dc7def 100644 (file)
@@ -92,7 +92,7 @@ unsigned long try_msr_calibrate_tsc(void)
 
        if (freq_desc_tables[cpu_index].msr_plat) {
                rdmsr(MSR_PLATFORM_INFO, lo, hi);
-               ratio = (lo >> 8) & 0x1f;
+               ratio = (lo >> 8) & 0xff;
        } else {
                rdmsr(MSR_IA32_PERF_STATUS, lo, hi);
                ratio = (hi >> 8) & 0x1f;
index 0f6294376fbdcaa2863956822583d09108cecada..a2f24af3c999ca5f14c553bf5e9875725c47668a 100644 (file)
@@ -5110,13 +5110,17 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
 
 static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
 {
+       register void *__sp asm(_ASM_SP);
        ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
+
        if (!(ctxt->d & ByteOp))
                fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
+
        asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
            : "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
-             [fastop]"+S"(fop)
+             [fastop]"+S"(fop), "+r"(__sp)
            : "c"(ctxt->src2.val));
+
        ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
        if (!fop) /* exception is returned in fop variable */
                return emulate_de(ctxt);
index 1ff4dbb73fb7a7ee56c2c28e0d37a6a049f3adc9..b6f50e8b0a393675009a5dcaad7f30af315bc91d 100644 (file)
@@ -2823,7 +2823,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
         */
        if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
            level == PT_PAGE_TABLE_LEVEL &&
-           PageTransCompound(pfn_to_page(pfn)) &&
+           PageTransCompoundMap(pfn_to_page(pfn)) &&
            !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
                unsigned long mask;
                /*
@@ -4785,7 +4785,7 @@ restart:
                 */
                if (sp->role.direct &&
                        !kvm_is_reserved_pfn(pfn) &&
-                       PageTransCompound(pfn_to_page(pfn))) {
+                       PageTransCompoundMap(pfn_to_page(pfn))) {
                        drop_spte(kvm, sptep);
                        need_tlb_flush = 1;
                        goto restart;
index 01be9ec3bf792f65ac91cc57fe7d64e8e644998f..a1f0e1d0ddc2453a07f86c53ec9791cc9164c6ba 100644 (file)
@@ -1125,8 +1125,14 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
 static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
                               int primary)
 {
-       if (cpa->pgd)
+       if (cpa->pgd) {
+               /*
+                * Right now, we only execute this code path when mapping
+                * the EFI virtual memory map regions, no other users
+                * provide a ->pgd value. This may change in the future.
+                */
                return populate_pgd(cpa, vaddr);
+       }
 
        /*
         * Ignore all non primary paths.
index a2433817c987833f525380d8425bb6e902873bbd..6a2f5691b1ab57261809617ca9a9b375adfe135e 100644 (file)
@@ -43,40 +43,40 @@ void __init efi_bgrt_init(void)
                return;
 
        if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
-               pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
+               pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n",
                       bgrt_tab->header.length, sizeof(*bgrt_tab));
                return;
        }
        if (bgrt_tab->version != 1) {
-               pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
+               pr_notice("Ignoring BGRT: invalid version %u (expected 1)\n",
                       bgrt_tab->version);
                return;
        }
        if (bgrt_tab->status & 0xfe) {
-               pr_err("Ignoring BGRT: reserved status bits are non-zero %u\n",
+               pr_notice("Ignoring BGRT: reserved status bits are non-zero %u\n",
                       bgrt_tab->status);
                return;
        }
        if (bgrt_tab->image_type != 0) {
-               pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
+               pr_notice("Ignoring BGRT: invalid image type %u (expected 0)\n",
                       bgrt_tab->image_type);
                return;
        }
        if (!bgrt_tab->image_address) {
-               pr_err("Ignoring BGRT: null image address\n");
+               pr_notice("Ignoring BGRT: null image address\n");
                return;
        }
 
        image = memremap(bgrt_tab->image_address, sizeof(bmp_header), MEMREMAP_WB);
        if (!image) {
-               pr_err("Ignoring BGRT: failed to map image header memory\n");
+               pr_notice("Ignoring BGRT: failed to map image header memory\n");
                return;
        }
 
        memcpy(&bmp_header, image, sizeof(bmp_header));
        memunmap(image);
        if (bmp_header.id != 0x4d42) {
-               pr_err("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
+               pr_notice("Ignoring BGRT: Incorrect BMP magic number 0x%x (expected 0x4d42)\n",
                        bmp_header.id);
                return;
        }
@@ -84,14 +84,14 @@ void __init efi_bgrt_init(void)
 
        bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
        if (!bgrt_image) {
-               pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
+               pr_notice("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
                       bgrt_image_size);
                return;
        }
 
        image = memremap(bgrt_tab->image_address, bmp_header.size, MEMREMAP_WB);
        if (!image) {
-               pr_err("Ignoring BGRT: failed to map image memory\n");
+               pr_notice("Ignoring BGRT: failed to map image memory\n");
                kfree(bgrt_image);
                bgrt_image = NULL;
                return;
index 994a7df84a7bc713aa4913f04fc7c85992840183..f93545e7dc54e7e2aa19bf494db55eb74080b739 100644 (file)
 #include <asm/rtc.h>
 #include <asm/uv/uv.h>
 
-#define EFI_DEBUG
-
-struct efi_memory_map memmap;
-
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
@@ -119,11 +115,10 @@ void efi_get_time(struct timespec *now)
 
 void __init efi_find_mirror(void)
 {
-       void *p;
+       efi_memory_desc_t *md;
        u64 mirror_size = 0, total_size = 0;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               efi_memory_desc_t *md = p;
+       for_each_efi_memory_desc(md) {
                unsigned long long start = md->phys_addr;
                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 
@@ -146,10 +141,9 @@ void __init efi_find_mirror(void)
 
 static void __init do_add_efi_memmap(void)
 {
-       void *p;
+       efi_memory_desc_t *md;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               efi_memory_desc_t *md = p;
+       for_each_efi_memory_desc(md) {
                unsigned long long start = md->phys_addr;
                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
                int e820_type;
@@ -209,47 +203,47 @@ int __init efi_memblock_x86_reserve_range(void)
 #else
        pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
 #endif
-       memmap.phys_map         = pmap;
-       memmap.nr_map           = e->efi_memmap_size /
+       efi.memmap.phys_map     = pmap;
+       efi.memmap.nr_map       = e->efi_memmap_size /
                                  e->efi_memdesc_size;
-       memmap.desc_size        = e->efi_memdesc_size;
-       memmap.desc_version     = e->efi_memdesc_version;
+       efi.memmap.desc_size    = e->efi_memdesc_size;
+       efi.memmap.desc_version = e->efi_memdesc_version;
 
-       memblock_reserve(pmap, memmap.nr_map * memmap.desc_size);
+       WARN(efi.memmap.desc_version != 1,
+            "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+            efi.memmap.desc_version);
 
-       efi.memmap = &memmap;
+       memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
 
        return 0;
 }
 
 void __init efi_print_memmap(void)
 {
-#ifdef EFI_DEBUG
        efi_memory_desc_t *md;
-       void *p;
-       int i;
+       int i = 0;
 
-       for (p = memmap.map, i = 0;
-            p < memmap.map_end;
-            p += memmap.desc_size, i++) {
+       for_each_efi_memory_desc(md) {
                char buf[64];
 
-               md = p;
                pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
-                       i, efi_md_typeattr_format(buf, sizeof(buf), md),
+                       i++, efi_md_typeattr_format(buf, sizeof(buf), md),
                        md->phys_addr,
                        md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
                        (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
        }
-#endif  /*  EFI_DEBUG  */
 }
 
 void __init efi_unmap_memmap(void)
 {
+       unsigned long size;
+
        clear_bit(EFI_MEMMAP, &efi.flags);
-       if (memmap.map) {
-               early_memunmap(memmap.map, memmap.nr_map * memmap.desc_size);
-               memmap.map = NULL;
+
+       size = efi.memmap.nr_map * efi.memmap.desc_size;
+       if (efi.memmap.map) {
+               early_memunmap(efi.memmap.map, size);
+               efi.memmap.map = NULL;
        }
 }
 
@@ -352,8 +346,6 @@ static int __init efi_systab_init(void *phys)
                       efi.systab->hdr.revision >> 16,
                       efi.systab->hdr.revision & 0xffff);
 
-       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
-
        return 0;
 }
 
@@ -440,17 +432,22 @@ static int __init efi_runtime_init(void)
 
 static int __init efi_memmap_init(void)
 {
+       unsigned long addr, size;
+
        if (efi_enabled(EFI_PARAVIRT))
                return 0;
 
        /* Map the EFI memory map */
-       memmap.map = early_memremap((unsigned long)memmap.phys_map,
-                                  memmap.nr_map * memmap.desc_size);
-       if (memmap.map == NULL) {
+       size = efi.memmap.nr_map * efi.memmap.desc_size;
+       addr = (unsigned long)efi.memmap.phys_map;
+
+       efi.memmap.map = early_memremap(addr, size);
+       if (efi.memmap.map == NULL) {
                pr_err("Could not map the memory map!\n");
                return -ENOMEM;
        }
-       memmap.map_end = memmap.map + (memmap.nr_map * memmap.desc_size);
+
+       efi.memmap.map_end = efi.memmap.map + size;
 
        if (add_efi_memmap)
                do_add_efi_memmap();
@@ -552,12 +549,9 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
 void __init runtime_code_page_mkexec(void)
 {
        efi_memory_desc_t *md;
-       void *p;
 
        /* Make EFI runtime service code area executable */
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
-
+       for_each_efi_memory_desc(md) {
                if (md->type != EFI_RUNTIME_SERVICES_CODE)
                        continue;
 
@@ -604,12 +598,10 @@ void __init old_map_region(efi_memory_desc_t *md)
 /* Merge contiguous regions of the same type and attribute */
 static void __init efi_merge_regions(void)
 {
-       void *p;
        efi_memory_desc_t *md, *prev_md = NULL;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+       for_each_efi_memory_desc(md) {
                u64 prev_size;
-               md = p;
 
                if (!prev_md) {
                        prev_md = md;
@@ -651,30 +643,31 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
 static void __init save_runtime_map(void)
 {
 #ifdef CONFIG_KEXEC_CORE
+       unsigned long desc_size;
        efi_memory_desc_t *md;
-       void *tmp, *p, *q = NULL;
+       void *tmp, *q = NULL;
        int count = 0;
 
        if (efi_enabled(EFI_OLD_MEMMAP))
                return;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
+       desc_size = efi.memmap.desc_size;
 
+       for_each_efi_memory_desc(md) {
                if (!(md->attribute & EFI_MEMORY_RUNTIME) ||
                    (md->type == EFI_BOOT_SERVICES_CODE) ||
                    (md->type == EFI_BOOT_SERVICES_DATA))
                        continue;
-               tmp = krealloc(q, (count + 1) * memmap.desc_size, GFP_KERNEL);
+               tmp = krealloc(q, (count + 1) * desc_size, GFP_KERNEL);
                if (!tmp)
                        goto out;
                q = tmp;
 
-               memcpy(q + count * memmap.desc_size, md, memmap.desc_size);
+               memcpy(q + count * desc_size, md, desc_size);
                count++;
        }
 
-       efi_runtime_map_setup(q, count, memmap.desc_size);
+       efi_runtime_map_setup(q, count, desc_size);
        return;
 
 out:
@@ -714,10 +707,10 @@ static inline void *efi_map_next_entry_reverse(void *entry)
 {
        /* Initial call */
        if (!entry)
-               return memmap.map_end - memmap.desc_size;
+               return efi.memmap.map_end - efi.memmap.desc_size;
 
-       entry -= memmap.desc_size;
-       if (entry < memmap.map)
+       entry -= efi.memmap.desc_size;
+       if (entry < efi.memmap.map)
                return NULL;
 
        return entry;
@@ -759,10 +752,10 @@ static void *efi_map_next_entry(void *entry)
 
        /* Initial call */
        if (!entry)
-               return memmap.map;
+               return efi.memmap.map;
 
-       entry += memmap.desc_size;
-       if (entry >= memmap.map_end)
+       entry += efi.memmap.desc_size;
+       if (entry >= efi.memmap.map_end)
                return NULL;
 
        return entry;
@@ -776,8 +769,11 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
 {
        void *p, *new_memmap = NULL;
        unsigned long left = 0;
+       unsigned long desc_size;
        efi_memory_desc_t *md;
 
+       desc_size = efi.memmap.desc_size;
+
        p = NULL;
        while ((p = efi_map_next_entry(p))) {
                md = p;
@@ -792,7 +788,7 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
                efi_map_region(md);
                get_systab_virt_addr(md);
 
-               if (left < memmap.desc_size) {
+               if (left < desc_size) {
                        new_memmap = realloc_pages(new_memmap, *pg_shift);
                        if (!new_memmap)
                                return NULL;
@@ -801,10 +797,9 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
                        (*pg_shift)++;
                }
 
-               memcpy(new_memmap + (*count * memmap.desc_size), md,
-                      memmap.desc_size);
+               memcpy(new_memmap + (*count * desc_size), md, desc_size);
 
-               left -= memmap.desc_size;
+               left -= desc_size;
                (*count)++;
        }
 
@@ -816,7 +811,6 @@ static void __init kexec_enter_virtual_mode(void)
 #ifdef CONFIG_KEXEC_CORE
        efi_memory_desc_t *md;
        unsigned int num_pages;
-       void *p;
 
        efi.systab = NULL;
 
@@ -840,8 +834,7 @@ static void __init kexec_enter_virtual_mode(void)
        * Map efi regions which were passed via setup_data. The virt_addr is a
        * fixed addr which was used in first kernel of a kexec boot.
        */
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
+       for_each_efi_memory_desc(md) {
                efi_map_region_fixed(md); /* FIXME: add error handling */
                get_systab_virt_addr(md);
        }
@@ -850,10 +843,10 @@ static void __init kexec_enter_virtual_mode(void)
 
        BUG_ON(!efi.systab);
 
-       num_pages = ALIGN(memmap.nr_map * memmap.desc_size, PAGE_SIZE);
+       num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
        num_pages >>= PAGE_SHIFT;
 
-       if (efi_setup_page_tables(memmap.phys_map, num_pages)) {
+       if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
                clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
                return;
        }
@@ -937,16 +930,16 @@ static void __init __efi_enter_virtual_mode(void)
 
        if (efi_is_native()) {
                status = phys_efi_set_virtual_address_map(
-                               memmap.desc_size * count,
-                               memmap.desc_size,
-                               memmap.desc_version,
+                               efi.memmap.desc_size * count,
+                               efi.memmap.desc_size,
+                               efi.memmap.desc_version,
                                (efi_memory_desc_t *)__pa(new_memmap));
        } else {
                status = efi_thunk_set_virtual_address_map(
                                efi_phys.set_virtual_address_map,
-                               memmap.desc_size * count,
-                               memmap.desc_size,
-                               memmap.desc_version,
+                               efi.memmap.desc_size * count,
+                               efi.memmap.desc_size,
+                               efi.memmap.desc_version,
                                (efi_memory_desc_t *)__pa(new_memmap));
        }
 
@@ -1011,13 +1004,11 @@ void __init efi_enter_virtual_mode(void)
 u32 efi_mem_type(unsigned long phys_addr)
 {
        efi_memory_desc_t *md;
-       void *p;
 
        if (!efi_enabled(EFI_MEMMAP))
                return 0;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
+       for_each_efi_memory_desc(md) {
                if ((md->phys_addr <= phys_addr) &&
                    (phys_addr < (md->phys_addr +
                                  (md->num_pages << EFI_PAGE_SHIFT))))
index 49e4dd4a1f58257630c8b341bf8b12922200cd2e..6e7242be1c8744b89af4a96587fb79d3a387e77a 100644 (file)
@@ -55,14 +55,12 @@ struct efi_scratch efi_scratch;
 static void __init early_code_mapping_set_exec(int executable)
 {
        efi_memory_desc_t *md;
-       void *p;
 
        if (!(__supported_pte_mask & _PAGE_NX))
                return;
 
        /* Make EFI service code area executable */
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               md = p;
+       for_each_efi_memory_desc(md) {
                if (md->type == EFI_RUNTIME_SERVICES_CODE ||
                    md->type == EFI_BOOT_SERVICES_CODE)
                        efi_set_executable(md, executable);
@@ -253,7 +251,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
         * Map all of RAM so that we can access arguments in the 1:1
         * mapping when making EFI runtime calls.
         */
-       for_each_efi_memory_desc(&memmap, md) {
+       for_each_efi_memory_desc(md) {
                if (md->type != EFI_CONVENTIONAL_MEMORY &&
                    md->type != EFI_LOADER_DATA &&
                    md->type != EFI_LOADER_CODE)
@@ -398,7 +396,6 @@ void __init efi_runtime_update_mappings(void)
        unsigned long pfn;
        pgd_t *pgd = efi_pgd;
        efi_memory_desc_t *md;
-       void *p;
 
        if (efi_enabled(EFI_OLD_MEMMAP)) {
                if (__supported_pte_mask & _PAGE_NX)
@@ -409,9 +406,8 @@ void __init efi_runtime_update_mappings(void)
        if (!efi_enabled(EFI_NX_PE_DATA))
                return;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
+       for_each_efi_memory_desc(md) {
                unsigned long pf = 0;
-               md = p;
 
                if (!(md->attribute & EFI_MEMORY_RUNTIME))
                        continue;
index ab50ada1d56e4f87f6fff3207995ea32312fb37b..097cb09d917b1f826c13d6fb498115a2dd690d31 100644 (file)
@@ -195,10 +195,9 @@ static bool can_free_region(u64 start, u64 size)
 */
 void __init efi_reserve_boot_services(void)
 {
-       void *p;
+       efi_memory_desc_t *md;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               efi_memory_desc_t *md = p;
+       for_each_efi_memory_desc(md) {
                u64 start = md->phys_addr;
                u64 size = md->num_pages << EFI_PAGE_SHIFT;
                bool already_reserved;
@@ -250,10 +249,9 @@ void __init efi_reserve_boot_services(void)
 
 void __init efi_free_boot_services(void)
 {
-       void *p;
+       efi_memory_desc_t *md;
 
-       for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
-               efi_memory_desc_t *md = p;
+       for_each_efi_memory_desc(md) {
                unsigned long long start = md->phys_addr;
                unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
 
index a54f0543b956e5ccf5f20206e7983fe66ce2698d..b9f88b7751fbd87742b1d1439a1d89c97818f9ce 100644 (file)
@@ -9,24 +9,6 @@
 
 #include "blk.h"
 
-static bool iovec_gap_to_prv(struct request_queue *q,
-                            struct iovec *prv, struct iovec *cur)
-{
-       unsigned long prev_end;
-
-       if (!queue_virt_boundary(q))
-               return false;
-
-       if (prv->iov_base == NULL && prv->iov_len == 0)
-               /* prv is not set - don't check */
-               return false;
-
-       prev_end = (unsigned long)(prv->iov_base + prv->iov_len);
-
-       return (((unsigned long)cur->iov_base & queue_virt_boundary(q)) ||
-               prev_end & queue_virt_boundary(q));
-}
-
 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
                      struct bio *bio)
 {
@@ -125,31 +107,18 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
                        struct rq_map_data *map_data,
                        const struct iov_iter *iter, gfp_t gfp_mask)
 {
-       struct iovec iov, prv = {.iov_base = NULL, .iov_len = 0};
-       bool copy = (q->dma_pad_mask & iter->count) || map_data;
+       bool copy = false;
+       unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
        struct bio *bio = NULL;
        struct iov_iter i;
        int ret;
 
-       if (!iter || !iter->count)
-               return -EINVAL;
-
-       iov_for_each(iov, i, *iter) {
-               unsigned long uaddr = (unsigned long) iov.iov_base;
-
-               if (!iov.iov_len)
-                       return -EINVAL;
-
-               /*
-                * Keep going so we check length of all segments
-                */
-               if ((uaddr & queue_dma_alignment(q)) ||
-                   iovec_gap_to_prv(q, &prv, &iov))
-                       copy = true;
-
-               prv.iov_base = iov.iov_base;
-               prv.iov_len = iov.iov_len;
-       }
+       if (map_data)
+               copy = true;
+       else if (iov_iter_alignment(iter) & align)
+               copy = true;
+       else if (queue_virt_boundary(q))
+               copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
 
        i = *iter;
        do {
index 93a1fdc1feee68c9a8b15bef682886015884ce98..1d33beb6a1ae5a9378905cca7b9fa7323a96ff2b 100644 (file)
@@ -96,6 +96,7 @@ config CRYPTO_AKCIPHER
 config CRYPTO_RSA
        tristate "RSA algorithm"
        select CRYPTO_AKCIPHER
+       select CRYPTO_MANAGER
        select MPILIB
        select ASN1
        help
index 5fc1f172963dc6914f0f6def8435943acd67dfe7..3887a98abcc3c255ccc37de87f11411b17522086 100644 (file)
@@ -69,8 +69,9 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
        struct scatterlist *sg;
 
        sg = walk->sg;
-       walk->pg = sg_page(sg);
        walk->offset = sg->offset;
+       walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+       walk->offset = offset_in_page(walk->offset);
        walk->entrylen = sg->length;
 
        if (walk->entrylen > walk->total)
index b86883aedca11a617ad2e19bafe646bfecac583a..7d4acc4492338921dd1307c1cc4d4a252130b36a 100644 (file)
@@ -1776,6 +1776,7 @@ static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
 static int do_test_rsa(struct crypto_akcipher *tfm,
                       struct akcipher_testvec *vecs)
 {
+       char *xbuf[XBUFSIZE];
        struct akcipher_request *req;
        void *outbuf_enc = NULL;
        void *outbuf_dec = NULL;
@@ -1784,9 +1785,12 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
        int err = -ENOMEM;
        struct scatterlist src, dst, src_tab[2];
 
+       if (testmgr_alloc_buf(xbuf))
+               return err;
+
        req = akcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req)
-               return err;
+               goto free_xbuf;
 
        init_completion(&result.completion);
 
@@ -1804,9 +1808,14 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
        if (!outbuf_enc)
                goto free_req;
 
+       if (WARN_ON(vecs->m_size > PAGE_SIZE))
+               goto free_all;
+
+       memcpy(xbuf[0], vecs->m, vecs->m_size);
+
        sg_init_table(src_tab, 2);
-       sg_set_buf(&src_tab[0], vecs->m, 8);
-       sg_set_buf(&src_tab[1], vecs->m + 8, vecs->m_size - 8);
+       sg_set_buf(&src_tab[0], xbuf[0], 8);
+       sg_set_buf(&src_tab[1], xbuf[0] + 8, vecs->m_size - 8);
        sg_init_one(&dst, outbuf_enc, out_len_max);
        akcipher_request_set_crypt(req, src_tab, &dst, vecs->m_size,
                                   out_len_max);
@@ -1825,7 +1834,7 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
                goto free_all;
        }
        /* verify that encrypted message is equal to expected */
-       if (memcmp(vecs->c, sg_virt(req->dst), vecs->c_size)) {
+       if (memcmp(vecs->c, outbuf_enc, vecs->c_size)) {
                pr_err("alg: rsa: encrypt test failed. Invalid output\n");
                err = -EINVAL;
                goto free_all;
@@ -1840,7 +1849,13 @@ static int do_test_rsa(struct crypto_akcipher *tfm,
                err = -ENOMEM;
                goto free_all;
        }
-       sg_init_one(&src, vecs->c, vecs->c_size);
+
+       if (WARN_ON(vecs->c_size > PAGE_SIZE))
+               goto free_all;
+
+       memcpy(xbuf[0], vecs->c, vecs->c_size);
+
+       sg_init_one(&src, xbuf[0], vecs->c_size);
        sg_init_one(&dst, outbuf_dec, out_len_max);
        init_completion(&result.completion);
        akcipher_request_set_crypt(req, &src, &dst, vecs->c_size, out_len_max);
@@ -1867,6 +1882,8 @@ free_all:
        kfree(outbuf_enc);
 free_req:
        akcipher_request_free(req);
+free_xbuf:
+       testmgr_free_buf(xbuf);
        return err;
 }
 
index 1982310e6d83a3aa6a6e510e8f72682fb1c05226..da198b8641074cff2830687be53ffa79ba65df0c 100644 (file)
@@ -428,6 +428,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
                                obj_desc->method.mutex->mutex.
                                    original_sync_level =
                                    obj_desc->method.mutex->mutex.sync_level;
+
+                               obj_desc->method.mutex->mutex.thread_id =
+                                   acpi_os_get_thread_id();
                        }
                }
 
index d0f35e63640ba78b956dd6ed3b870a542fedaac5..63cc9dbe4f3b4d1db7037fb4ca1df532b8dc5534 100644 (file)
@@ -287,8 +287,11 @@ static int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc,
                                        offset);
                        rc = -ENXIO;
                }
-       } else
+       } else {
                rc = 0;
+               if (cmd_rc)
+                       *cmd_rc = xlat_status(buf, cmd);
+       }
 
  out:
        ACPI_FREE(out_obj);
index 5083f85efea76edb7f2968ffa967becbf4d4943a..cfa936a32513b1d70d74834fa743b6f612665dc1 100644 (file)
@@ -202,6 +202,14 @@ config SATA_FSL
 
          If unsure, say N.
 
+config SATA_AHCI_SEATTLE
+       tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
+       depends on ARCH_SEATTLE
+       help
+        This option enables support for AMD Seattle SATA host controller.
+
+        If unsure, say N
+
 config SATA_INIC162X
        tristate "Initio 162x SATA support (Very Experimental)"
        depends on PCI
index 18579521464e48e22001838ab4d0abf3efbc4b0a..0b2afb7e5f359979f7a54cdadb3178b174c885b0 100644 (file)
@@ -4,6 +4,7 @@ obj-$(CONFIG_ATA)               += libata.o
 # non-SFF interface
 obj-$(CONFIG_SATA_AHCI)                += ahci.o libahci.o
 obj-$(CONFIG_SATA_ACARD_AHCI)  += acard-ahci.o libahci.o
+obj-$(CONFIG_SATA_AHCI_SEATTLE)        += ahci_seattle.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_AHCI_PLATFORM) += ahci_platform.o libahci.o libahci_platform.o
 obj-$(CONFIG_SATA_FSL)         += sata_fsl.o
 obj-$(CONFIG_SATA_INIC162X)    += sata_inic162x.o
index 40442332bfa7c154b93b540eae3eb8ae94e6d9f3..62a04c8fb5c99c9ab0614bb6dc8f0146c47329e6 100644 (file)
@@ -51,6 +51,9 @@ static int ahci_probe(struct platform_device *pdev)
        if (rc)
                return rc;
 
+       of_property_read_u32(dev->of_node,
+                            "ports-implemented", &hpriv->force_port_map);
+
        if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
                hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
 
diff --git a/drivers/ata/ahci_seattle.c b/drivers/ata/ahci_seattle.c
new file mode 100644 (file)
index 0000000..6e702ab
--- /dev/null
@@ -0,0 +1,210 @@
+/*
+ * AMD Seattle AHCI SATA driver
+ *
+ * Copyright (c) 2015, Advanced Micro Devices
+ * Author: Brijesh Singh <brijesh.singh@amd.com>
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/libata.h>
+#include <linux/ahci_platform.h>
+#include <linux/acpi.h>
+#include <linux/pci_ids.h>
+#include "ahci.h"
+
+/* SGPIO Control Register definition
+ *
+ * Bit         Type            Description
+ * 31          RW              OD7.2 (activity)
+ * 30          RW              OD7.1 (locate)
+ * 29          RW              OD7.0 (fault)
+ * 28...8      RW              OD6.2...OD0.0 (3bits per port, 1 bit per LED)
+ * 7           RO              SGPIO feature flag
+ * 6:4         RO              Reserved
+ * 3:0         RO              Number of ports (0 means no port supported)
+ */
+#define ACTIVITY_BIT_POS(x)            (8 + (3 * x))
+#define LOCATE_BIT_POS(x)              (ACTIVITY_BIT_POS(x) + 1)
+#define FAULT_BIT_POS(x)               (LOCATE_BIT_POS(x) + 1)
+
+#define ACTIVITY_MASK                  0x00010000
+#define LOCATE_MASK                    0x00080000
+#define FAULT_MASK                     0x00400000
+
+#define DRV_NAME "ahci-seattle"
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+                                           ssize_t size);
+
+struct seattle_plat_data {
+       void __iomem *sgpio_ctrl;
+};
+
+static struct ata_port_operations ahci_port_ops = {
+       .inherits               = &ahci_ops,
+};
+
+static const struct ata_port_info ahci_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_port_ops,
+};
+
+static struct ata_port_operations ahci_seattle_ops = {
+       .inherits               = &ahci_ops,
+       .transmit_led_message   = seattle_transmit_led_message,
+};
+
+static const struct ata_port_info ahci_port_seattle_info = {
+       .flags          = AHCI_FLAG_COMMON | ATA_FLAG_EM | ATA_FLAG_SW_ACTIVITY,
+       .link_flags     = ATA_LFLAG_SW_ACTIVITY,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_seattle_ops,
+};
+
+static struct scsi_host_template ahci_platform_sht = {
+       AHCI_SHT(DRV_NAME),
+};
+
+static ssize_t seattle_transmit_led_message(struct ata_port *ap, u32 state,
+                                           ssize_t size)
+{
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+       struct ahci_port_priv *pp = ap->private_data;
+       struct seattle_plat_data *plat_data = hpriv->plat_data;
+       unsigned long flags;
+       int pmp;
+       struct ahci_em_priv *emp;
+       u32 val;
+
+       /* get the slot number from the message */
+       pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
+       if (pmp >= EM_MAX_SLOTS)
+               return -EINVAL;
+       emp = &pp->em_priv[pmp];
+
+       val = ioread32(plat_data->sgpio_ctrl);
+       if (state & ACTIVITY_MASK)
+               val |= 1 << ACTIVITY_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << ACTIVITY_BIT_POS((ap->port_no)));
+
+       if (state & LOCATE_MASK)
+               val |= 1 << LOCATE_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << LOCATE_BIT_POS((ap->port_no)));
+
+       if (state & FAULT_MASK)
+               val |= 1 << FAULT_BIT_POS((ap->port_no));
+       else
+               val &= ~(1 << FAULT_BIT_POS((ap->port_no)));
+
+       iowrite32(val, plat_data->sgpio_ctrl);
+
+       spin_lock_irqsave(ap->lock, flags);
+
+       /* save off new led state for port/slot */
+       emp->led_state = state;
+
+       spin_unlock_irqrestore(ap->lock, flags);
+
+       return size;
+}
+
+static const struct ata_port_info *ahci_seattle_get_port_info(
+               struct platform_device *pdev, struct ahci_host_priv *hpriv)
+{
+       struct device *dev = &pdev->dev;
+       struct seattle_plat_data *plat_data;
+       u32 val;
+
+       plat_data = devm_kzalloc(dev, sizeof(*plat_data), GFP_KERNEL);
+       if (IS_ERR(plat_data))
+               return &ahci_port_info;
+
+       plat_data->sgpio_ctrl = devm_ioremap_resource(dev,
+                             platform_get_resource(pdev, IORESOURCE_MEM, 1));
+       if (IS_ERR(plat_data->sgpio_ctrl))
+               return &ahci_port_info;
+
+       val = ioread32(plat_data->sgpio_ctrl);
+
+       if (!(val & 0xf))
+               return &ahci_port_info;
+
+       hpriv->em_loc = 0;
+       hpriv->em_buf_sz = 4;
+       hpriv->em_msg_type = EM_MSG_TYPE_LED;
+       hpriv->plat_data = plat_data;
+
+       dev_info(dev, "SGPIO LED control is enabled.\n");
+       return &ahci_port_seattle_info;
+}
+
+static int ahci_seattle_probe(struct platform_device *pdev)
+{
+       int rc;
+       struct ahci_host_priv *hpriv;
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       rc = ahci_platform_enable_resources(hpriv);
+       if (rc)
+               return rc;
+
+       rc = ahci_platform_init_host(pdev, hpriv,
+                                    ahci_seattle_get_port_info(pdev, hpriv),
+                                    &ahci_platform_sht);
+       if (rc)
+               goto disable_resources;
+
+       return 0;
+disable_resources:
+       ahci_platform_disable_resources(hpriv);
+       return rc;
+}
+
+static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
+                        ahci_platform_resume);
+
+static const struct acpi_device_id ahci_acpi_match[] = {
+       { "AMDI0600", 0 },
+       {}
+};
+MODULE_DEVICE_TABLE(acpi, ahci_acpi_match);
+
+static struct platform_driver ahci_seattle_driver = {
+       .probe = ahci_seattle_probe,
+       .remove = ata_platform_remove_one,
+       .driver = {
+               .name = DRV_NAME,
+               .acpi_match_table = ahci_acpi_match,
+               .pm = &ahci_pm_ops,
+       },
+};
+module_platform_driver(ahci_seattle_driver);
+
+MODULE_DESCRIPTION("Seattle AHCI SATA platform driver");
+MODULE_AUTHOR("Brijesh Singh <brijesh.singh@amd.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
index 3982054060b81bb0d253b001883b32e346c6dc74..a5d7c1c2a05ee26dc80a41d440357c4a7be96a20 100644 (file)
@@ -507,6 +507,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
                dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
                         port_map, hpriv->force_port_map);
                port_map = hpriv->force_port_map;
+               hpriv->saved_port_map = port_map;
        }
 
        if (hpriv->mask_port_map) {
index 433b60092972d56abba55897158d6c22156cf631..d8f4cc22856c924b1be7bf1aa97f175b6579c554 100644 (file)
@@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
        reg = opp_table->regulator;
        if (IS_ERR(reg)) {
                /* Regulator may not be required for device */
-               if (reg)
-                       dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
-                               PTR_ERR(reg));
                rcu_read_unlock();
                return 0;
        }
index 9b1a65debd49e645d41343eac7d54a4369ddc2fd..7f692accdc90ec296c55ba738b2cd7f65426ba26 100644 (file)
@@ -21,7 +21,7 @@
 
 static inline bool is_pset_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_PDATA;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_PDATA;
 }
 
 static inline struct property_set *to_pset_node(struct fwnode_handle *fwnode)
index 5c79526245c2e3309a28766b781a9f2bccc850d5..a0380338946a1dbd6cb7e54ab70b001edc77eef7 100644 (file)
@@ -13,6 +13,7 @@
 #ifndef _REGMAP_INTERNAL_H
 #define _REGMAP_INTERNAL_H
 
+#include <linux/device.h>
 #include <linux/regmap.h>
 #include <linux/fs.h>
 #include <linux/list.h>
index 7526906ca080f81dcff1499b7e57c2ff0b79569e..5189fd6182f6c6126b21b1d6b3e25f760d4df290 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/regmap.h>
 #include <linux/slab.h>
 
+#include "internal.h"
+
 struct regmap_mmio_context {
        void __iomem *regs;
        unsigned val_bytes;
@@ -212,6 +214,7 @@ static const struct regmap_bus regmap_mmio = {
        .reg_write = regmap_mmio_write,
        .reg_read = regmap_mmio_read,
        .free_context = regmap_mmio_free_context,
+       .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
 };
 
 static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
@@ -245,7 +248,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
        ctx->val_bytes = config->val_bits / 8;
        ctx->clk = ERR_PTR(-ENODEV);
 
-       switch (config->reg_format_endian) {
+       switch (regmap_get_val_endian(dev, &regmap_mmio, config)) {
        case REGMAP_ENDIAN_DEFAULT:
        case REGMAP_ENDIAN_LITTLE:
 #ifdef __LITTLE_ENDIAN
index 7e58f656039900e633729828f5eae5fb1700b93a..4a36e415e938560ce2e3ba927b81888444767a7d 100644 (file)
@@ -142,7 +142,7 @@ static int regmap_spmi_ext_read(void *context,
        while (val_size) {
                len = min_t(size_t, val_size, 8);
 
-               err = spmi_ext_register_readl(context, addr, val, val_size);
+               err = spmi_ext_register_readl(context, addr, val, len);
                if (err)
                        goto err_out;
 
index 02e18182fcb5506da4c4981a0d5f1926376099e6..2beb396fe6523cb332ec369d6a65a1c7bf9d3e25 100644 (file)
@@ -394,7 +394,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
                clk[IMX6QDL_CLK_LDB_DI1_DIV_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1", 2, 7);
        } else {
                clk[IMX6QDL_CLK_ECSPI_ROOT] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
-               clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60", base + 0x20, 2, 6);
+               clk[IMX6QDL_CLK_CAN_ROOT] = imx_clk_divider("can_root", "pll3_60m", base + 0x20, 2, 6);
                clk[IMX6QDL_CLK_IPG_PER] = imx_clk_fixup_divider("ipg_per", "ipg", base + 0x1c, 0, 6, imx_cscmr1_fixup);
                clk[IMX6QDL_CLK_UART_SERIAL_PODF] = imx_clk_divider("uart_serial_podf", "pll3_80m",          base + 0x24, 0,  6);
                clk[IMX6QDL_CLK_LDB_DI0_DIV_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
index e93405f0eac46565d18c25873d1113c3a1aa65fd..c4acfc5273b3c1f8c5efc321f5ea8407a4b7f02b 100644 (file)
@@ -1557,21 +1557,25 @@ void cpufreq_suspend(void)
        if (!cpufreq_driver)
                return;
 
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->suspend)
                goto suspend;
 
        pr_debug("%s: Suspending Governors\n", __func__);
 
        for_each_active_policy(policy) {
-               down_write(&policy->rwsem);
-               ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
-               up_write(&policy->rwsem);
+               if (has_target()) {
+                       down_write(&policy->rwsem);
+                       ret = cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+                       up_write(&policy->rwsem);
 
-               if (ret)
-                       pr_err("%s: Failed to stop governor for policy: %p\n",
-                               __func__, policy);
-               else if (cpufreq_driver->suspend
-                   && cpufreq_driver->suspend(policy))
+                       if (ret) {
+                               pr_err("%s: Failed to stop governor for policy: %p\n",
+                                       __func__, policy);
+                               continue;
+                       }
+               }
+
+               if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
                        pr_err("%s: Failed to suspend driver: %p\n", __func__,
                                policy);
        }
@@ -1596,7 +1600,7 @@ void cpufreq_resume(void)
 
        cpufreq_suspended = false;
 
-       if (!has_target())
+       if (!has_target() && !cpufreq_driver->resume)
                return;
 
        pr_debug("%s: Resuming Governors\n", __func__);
@@ -1605,7 +1609,7 @@ void cpufreq_resume(void)
                if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
                        pr_err("%s: Failed to resume driver: %p\n", __func__,
                                policy);
-               } else {
+               } else if (has_target()) {
                        down_write(&policy->rwsem);
                        ret = cpufreq_start_governor(policy);
                        up_write(&policy->rwsem);
index f502d5b90c2537b88a86b5224f21d175556be744..b230ebaae66cb7ee0def1d228ff33c778343f344 100644 (file)
@@ -453,6 +453,14 @@ static void intel_pstate_hwp_set(const struct cpumask *cpumask)
        }
 }
 
+static int intel_pstate_hwp_set_policy(struct cpufreq_policy *policy)
+{
+       if (hwp_active)
+               intel_pstate_hwp_set(policy->cpus);
+
+       return 0;
+}
+
 static void intel_pstate_hwp_set_online_cpus(void)
 {
        get_online_cpus();
@@ -1062,8 +1070,9 @@ static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
 
 static inline int32_t get_avg_frequency(struct cpudata *cpu)
 {
-       return div64_u64(cpu->pstate.max_pstate_physical * cpu->sample.aperf *
-               cpu->pstate.scaling, cpu->sample.mperf);
+       return fp_toint(mul_fp(cpu->sample.core_pct_busy,
+                              int_tofp(cpu->pstate.max_pstate_physical *
+                                               cpu->pstate.scaling / 100)));
 }
 
 static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
@@ -1106,8 +1115,6 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
        int32_t core_busy, max_pstate, current_pstate, sample_ratio;
        u64 duration_ns;
 
-       intel_pstate_calc_busy(cpu);
-
        /*
         * core_busy is the ratio of actual performance to max
         * max_pstate is the max non turbo pstate available
@@ -1191,8 +1198,11 @@ static void intel_pstate_update_util(struct update_util_data *data, u64 time,
        if ((s64)delta_ns >= pid_params.sample_rate_ns) {
                bool sample_taken = intel_pstate_sample(cpu, time);
 
-               if (sample_taken && !hwp_active)
-                       intel_pstate_adjust_busy_pstate(cpu);
+               if (sample_taken) {
+                       intel_pstate_calc_busy(cpu);
+                       if (!hwp_active)
+                               intel_pstate_adjust_busy_pstate(cpu);
+               }
        }
 }
 
@@ -1346,8 +1356,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
  out:
        intel_pstate_set_update_util_hook(policy->cpu);
 
-       if (hwp_active)
-               intel_pstate_hwp_set(policy->cpus);
+       intel_pstate_hwp_set_policy(policy);
 
        return 0;
 }
@@ -1411,6 +1420,7 @@ static struct cpufreq_driver intel_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .verify         = intel_pstate_verify_policy,
        .setpolicy      = intel_pstate_set_policy,
+       .resume         = intel_pstate_hwp_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
        .stop_cpu       = intel_pstate_stop_cpu,
index a9c659f589747a2140f2b91e0c07cd7fe1cbcb69..04042038ec4b75e78631eda625f0f1bb2a4b4d95 100644 (file)
@@ -259,6 +259,10 @@ static int sti_cpufreq_init(void)
 {
        int ret;
 
+       if ((!of_machine_is_compatible("st,stih407")) &&
+               (!of_machine_is_compatible("st,stih410")))
+               return -ENODEV;
+
        ddata.cpu = get_cpu_device(0);
        if (!ddata.cpu) {
                dev_err(ddata.cpu, "Failed to get device for CPU0\n");
index 545069d5fdfba3e7ab07f0ec296404c64ad2a977..e342565e8715e95af6c76a80ca393d8fcfada66a 100644 (file)
@@ -50,7 +50,7 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
                 * call the CPU ops suspend protocol with idle index as a
                 * parameter.
                 */
-               arm_cpuidle_suspend(idx);
+               ret = arm_cpuidle_suspend(idx);
 
                cpu_pm_exit();
        }
index 0e82ce3c383e8c6c1f1f39378fbff03cc17e0336..976b01e58afbfd7f6690b47108232540aac0ad6e 100644 (file)
@@ -236,6 +236,8 @@ void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
                                 uint32_t vf_mask);
 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
+int adf_init_pf_wq(void);
+void adf_exit_pf_wq(void);
 #else
 static inline int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
 {
@@ -253,5 +255,14 @@ static inline void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 static inline void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev)
 {
 }
+
+static inline int adf_init_pf_wq(void)
+{
+       return 0;
+}
+
+static inline void adf_exit_pf_wq(void)
+{
+}
 #endif
 #endif
index 5c897e6e799408df06e28a0df7363f2fa87cde87..3c3f948290ca057c29edc1297f731abe3b9ebc63 100644 (file)
@@ -462,12 +462,17 @@ static int __init adf_register_ctl_device_driver(void)
        if (adf_init_aer())
                goto err_aer;
 
+       if (adf_init_pf_wq())
+               goto err_pf_wq;
+
        if (qat_crypto_register())
                goto err_crypto_register;
 
        return 0;
 
 err_crypto_register:
+       adf_exit_pf_wq();
+err_pf_wq:
        adf_exit_aer();
 err_aer:
        adf_chr_drv_destroy();
@@ -480,6 +485,7 @@ static void __exit adf_unregister_ctl_device_driver(void)
 {
        adf_chr_drv_destroy();
        adf_exit_aer();
+       adf_exit_pf_wq();
        qat_crypto_unregister();
        adf_clean_vf_map(false);
        mutex_destroy(&adf_ctl_lock);
index 1117a8b58280a084396f696e77f14b9899386981..38a0415e767da3dea08e673eab1b402d0a978a71 100644 (file)
@@ -119,11 +119,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev)
        int i;
        u32 reg;
 
-       /* Workqueue for PF2VF responses */
-       pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
-       if (!pf2vf_resp_wq)
-               return -ENOMEM;
-
        for (i = 0, vf_info = accel_dev->pf.vf_info; i < totalvfs;
             i++, vf_info++) {
                /* This ptr will be populated when VFs will be created */
@@ -216,11 +211,6 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev)
 
        kfree(accel_dev->pf.vf_info);
        accel_dev->pf.vf_info = NULL;
-
-       if (pf2vf_resp_wq) {
-               destroy_workqueue(pf2vf_resp_wq);
-               pf2vf_resp_wq = NULL;
-       }
 }
 EXPORT_SYMBOL_GPL(adf_disable_sriov);
 
@@ -304,3 +294,19 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs)
        return numvfs;
 }
 EXPORT_SYMBOL_GPL(adf_sriov_configure);
+
+int __init adf_init_pf_wq(void)
+{
+       /* Workqueue for PF2VF responses */
+       pf2vf_resp_wq = create_workqueue("qat_pf2vf_resp_wq");
+
+       return !pf2vf_resp_wq ? -ENOMEM : 0;
+}
+
+void adf_exit_pf_wq(void)
+{
+       if (pf2vf_resp_wq) {
+               destroy_workqueue(pf2vf_resp_wq);
+               pf2vf_resp_wq = NULL;
+       }
+}
index e1670d533f9742e94b505ba0684d9373956c8bb0..6394152f648f4f0262a4c5be42a7ae0cb7aaa7a8 100644 (file)
@@ -87,6 +87,31 @@ config EFI_RUNTIME_WRAPPERS
 config EFI_ARMSTUB
        bool
 
+config EFI_BOOTLOADER_CONTROL
+       tristate "EFI Bootloader Control"
+       depends on EFI_VARS
+       default n
+       ---help---
+         This module installs a reboot hook, such that if reboot() is
+         invoked with a string argument NNN, "NNN" is copied to the
+         "LoaderEntryOneShot" EFI variable, to be read by the
+         bootloader. If the string matches one of the boot labels
+         defined in its configuration, the bootloader will boot once
+         to that label. The "LoaderEntryRebootReason" EFI variable is
+         set with the reboot reason: "reboot" or "shutdown". The
+         bootloader reads this reboot reason and takes particular
+         action according to its policy.
+
+config EFI_CAPSULE_LOADER
+       tristate "EFI capsule loader"
+       depends on EFI
+       help
+         This option exposes a loader interface "/dev/efi_capsule_loader" for
+         users to load EFI capsules. This driver requires working runtime
+         capsule support in the firmware, which many OEMs do not provide.
+
+         Most users should say N.
+
 endmenu
 
 config UEFI_CPER
index 62e654f255f4d1e4cb1ee1b955e8fd03ca689221..a219640f881f0306eb1888d924c4980277044e68 100644 (file)
@@ -9,7 +9,8 @@
 #
 KASAN_SANITIZE_runtime-wrappers.o      := n
 
-obj-$(CONFIG_EFI)                      += efi.o vars.o reboot.o
+obj-$(CONFIG_EFI)                      += efi.o vars.o reboot.o memattr.o
+obj-$(CONFIG_EFI)                      += capsule.o
 obj-$(CONFIG_EFI_VARS)                 += efivars.o
 obj-$(CONFIG_EFI_ESRT)                 += esrt.o
 obj-$(CONFIG_EFI_VARS_PSTORE)          += efi-pstore.o
@@ -18,7 +19,9 @@ obj-$(CONFIG_EFI_RUNTIME_MAP)         += runtime-map.o
 obj-$(CONFIG_EFI_RUNTIME_WRAPPERS)     += runtime-wrappers.o
 obj-$(CONFIG_EFI_STUB)                 += libstub/
 obj-$(CONFIG_EFI_FAKE_MEMMAP)          += fake_mem.o
+obj-$(CONFIG_EFI_BOOTLOADER_CONTROL)   += efibc.o
 
 arm-obj-$(CONFIG_EFI)                  := arm-init.o arm-runtime.o
 obj-$(CONFIG_ARM)                      += $(arm-obj-y)
 obj-$(CONFIG_ARM64)                    += $(arm-obj-y)
+obj-$(CONFIG_EFI_CAPSULE_LOADER)       += capsule-loader.o
index 8714f8c271babfff5566765605b5429f33bfa5d8..ef90f0c4b70a735def7eb9c56aec980f7813a8fe 100644 (file)
  *
  */
 
+#define pr_fmt(fmt)    "efi: " fmt
+
 #include <linux/efi.h>
 #include <linux/init.h>
 #include <linux/memblock.h>
 #include <linux/mm_types.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
 
 #include <asm/efi.h>
 
-struct efi_memory_map memmap;
-
 u64 efi_system_table;
 
 static int __init is_normal_ram(efi_memory_desc_t *md)
@@ -40,7 +42,7 @@ static phys_addr_t efi_to_phys(unsigned long addr)
 {
        efi_memory_desc_t *md;
 
-       for_each_efi_memory_desc(&memmap, md) {
+       for_each_efi_memory_desc(md) {
                if (!(md->attribute & EFI_MEMORY_RUNTIME))
                        continue;
                if (md->virt_addr == 0)
@@ -53,6 +55,36 @@ static phys_addr_t efi_to_phys(unsigned long addr)
        return addr;
 }
 
+static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+static __initdata efi_config_table_type_t arch_tables[] = {
+       {LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, NULL, &screen_info_table},
+       {NULL_GUID, NULL, NULL}
+};
+
+static void __init init_screen_info(void)
+{
+       struct screen_info *si;
+
+       if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+               si = early_memremap_ro(screen_info_table, sizeof(*si));
+               if (!si) {
+                       pr_err("Could not map screen_info config table\n");
+                       return;
+               }
+               screen_info = *si;
+               early_memunmap(si, sizeof(*si));
+
+               /* dummycon on ARM needs non-zero values for columns/lines */
+               screen_info.orig_video_cols = 80;
+               screen_info.orig_video_lines = 25;
+       }
+
+       if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+           memblock_is_map_memory(screen_info.lfb_base))
+               memblock_mark_nomap(screen_info.lfb_base, screen_info.lfb_size);
+}
+
 static int __init uefi_init(void)
 {
        efi_char16_t *c16;
@@ -85,6 +117,8 @@ static int __init uefi_init(void)
                        efi.systab->hdr.revision >> 16,
                        efi.systab->hdr.revision & 0xffff);
 
+       efi.runtime_version = efi.systab->hdr.revision;
+
        /* Show what we know for posterity */
        c16 = early_memremap_ro(efi_to_phys(efi.systab->fw_vendor),
                                sizeof(vendor) * sizeof(efi_char16_t));
@@ -108,7 +142,8 @@ static int __init uefi_init(void)
                goto out;
        }
        retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables,
-                                        sizeof(efi_config_table_t), NULL);
+                                        sizeof(efi_config_table_t),
+                                        arch_tables);
 
        early_memunmap(config_tables, table_size);
 out:
@@ -143,7 +178,7 @@ static __init void reserve_regions(void)
        if (efi_enabled(EFI_DBG))
                pr_info("Processing EFI memory map:\n");
 
-       for_each_efi_memory_desc(&memmap, md) {
+       for_each_efi_memory_desc(md) {
                paddr = md->phys_addr;
                npages = md->num_pages;
 
@@ -184,9 +219,9 @@ void __init efi_init(void)
 
        efi_system_table = params.system_table;
 
-       memmap.phys_map = params.mmap;
-       memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
-       if (memmap.map == NULL) {
+       efi.memmap.phys_map = params.mmap;
+       efi.memmap.map = early_memremap_ro(params.mmap, params.mmap_size);
+       if (efi.memmap.map == NULL) {
                /*
                * If we are booting via UEFI, the UEFI memory map is the only
                * description of memory we have, so there is little point in
@@ -194,28 +229,37 @@ void __init efi_init(void)
                */
                panic("Unable to map EFI memory map.\n");
        }
-       memmap.map_end = memmap.map + params.mmap_size;
-       memmap.desc_size = params.desc_size;
-       memmap.desc_version = params.desc_ver;
+       efi.memmap.map_end = efi.memmap.map + params.mmap_size;
+       efi.memmap.desc_size = params.desc_size;
+       efi.memmap.desc_version = params.desc_ver;
+
+       WARN(efi.memmap.desc_version != 1,
+            "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+             efi.memmap.desc_version);
 
        if (uefi_init() < 0)
                return;
 
        reserve_regions();
-       early_memunmap(memmap.map, params.mmap_size);
+       efi_memattr_init();
+       early_memunmap(efi.memmap.map, params.mmap_size);
 
-       if (IS_ENABLED(CONFIG_ARM)) {
-               /*
-                * ARM currently does not allow ioremap_cache() to be called on
-                * memory regions that are covered by struct page. So remove the
-                * UEFI memory map from the linear mapping.
-                */
-               memblock_mark_nomap(params.mmap & PAGE_MASK,
-                                   PAGE_ALIGN(params.mmap_size +
-                                              (params.mmap & ~PAGE_MASK)));
-       } else {
-               memblock_reserve(params.mmap & PAGE_MASK,
-                                PAGE_ALIGN(params.mmap_size +
-                                           (params.mmap & ~PAGE_MASK)));
-       }
+       memblock_reserve(params.mmap & PAGE_MASK,
+                        PAGE_ALIGN(params.mmap_size +
+                                   (params.mmap & ~PAGE_MASK)));
+
+       init_screen_info();
+}
+
+static int __init register_gop_device(void)
+{
+       void *pd;
+
+       if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+               return 0;
+
+       pd = platform_device_register_data(NULL, "efi-framebuffer", 0,
+                                          &screen_info, sizeof(screen_info));
+       return PTR_ERR_OR_ZERO(pd);
 }
+subsys_initcall(register_gop_device);
index 6ae21e41a429402d132d8d5be215d5adde64d08c..17ccf0a8787a2cf3f7ea1869e2e06801da2c46ef 100644 (file)
@@ -42,11 +42,13 @@ static struct mm_struct efi_mm = {
 static bool __init efi_virtmap_init(void)
 {
        efi_memory_desc_t *md;
+       bool systab_found;
 
        efi_mm.pgd = pgd_alloc(&efi_mm);
        init_new_context(NULL, &efi_mm);
 
-       for_each_efi_memory_desc(&memmap, md) {
+       systab_found = false;
+       for_each_efi_memory_desc(md) {
                phys_addr_t phys = md->phys_addr;
                int ret;
 
@@ -64,7 +66,25 @@ static bool __init efi_virtmap_init(void)
                                &phys, ret);
                        return false;
                }
+               /*
+                * If this entry covers the address of the UEFI system table,
+                * calculate and record its virtual address.
+                */
+               if (efi_system_table >= phys &&
+                   efi_system_table < phys + (md->num_pages * EFI_PAGE_SIZE)) {
+                       efi.systab = (void *)(unsigned long)(efi_system_table -
+                                                            phys + md->virt_addr);
+                       systab_found = true;
+               }
+       }
+       if (!systab_found) {
+               pr_err("No virtual mapping found for the UEFI System Table\n");
+               return false;
        }
+
+       if (efi_memattr_apply_permissions(&efi_mm, efi_set_mapping_permissions))
+               return false;
+
        return true;
 }
 
@@ -89,26 +109,17 @@ static int __init arm_enable_runtime_services(void)
 
        pr_info("Remapping and enabling EFI services.\n");
 
-       mapsize = memmap.map_end - memmap.map;
-       memmap.map = (__force void *)ioremap_cache(memmap.phys_map,
-                                                  mapsize);
-       if (!memmap.map) {
-               pr_err("Failed to remap EFI memory map\n");
-               return -ENOMEM;
-       }
-       memmap.map_end = memmap.map + mapsize;
-       efi.memmap = &memmap;
+       mapsize = efi.memmap.map_end - efi.memmap.map;
 
-       efi.systab = (__force void *)ioremap_cache(efi_system_table,
-                                                  sizeof(efi_system_table_t));
-       if (!efi.systab) {
-               pr_err("Failed to remap EFI System Table\n");
+       efi.memmap.map = memremap(efi.memmap.phys_map, mapsize, MEMREMAP_WB);
+       if (!efi.memmap.map) {
+               pr_err("Failed to remap EFI memory map\n");
                return -ENOMEM;
        }
-       set_bit(EFI_SYSTEM_TABLES, &efi.flags);
+       efi.memmap.map_end = efi.memmap.map + mapsize;
 
        if (!efi_virtmap_init()) {
-               pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n");
+               pr_err("UEFI virtual mapping missing or invalid -- runtime services will not be available\n");
                return -ENOMEM;
        }
 
@@ -116,8 +127,6 @@ static int __init arm_enable_runtime_services(void)
        efi_native_runtime_setup();
        set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
 
-       efi.runtime_version = efi.systab->hdr.revision;
-
        return 0;
 }
 early_initcall(arm_enable_runtime_services);
diff --git a/drivers/firmware/efi/capsule-loader.c b/drivers/firmware/efi/capsule-loader.c
new file mode 100644 (file)
index 0000000..c99c24b
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+ * EFI capsule loader driver.
+ *
+ * Copyright 2015 Intel Corporation
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/highmem.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/efi.h>
+
+#define NO_FURTHER_WRITE_ACTION -1
+
+struct capsule_info {
+       bool            header_obtained;
+       int             reset_type;
+       long            index;
+       size_t          count;
+       size_t          total_size;
+       struct page     **pages;
+       size_t          page_bytes_remain;
+};
+
+/**
+ * efi_free_all_buff_pages - free all previous allocated buffer pages
+ * @cap_info: pointer to current instance of capsule_info structure
+ *
+ *     In addition to freeing buffer pages, it flags NO_FURTHER_WRITE_ACTION
+ *     to cease processing data in subsequent write(2) calls until close(2)
+ *     is called.
+ **/
+static void efi_free_all_buff_pages(struct capsule_info *cap_info)
+{
+       while (cap_info->index > 0)
+               __free_page(cap_info->pages[--cap_info->index]);
+
+       cap_info->index = NO_FURTHER_WRITE_ACTION;
+}
+
+/**
+ * efi_capsule_setup_info - obtain the efi capsule header in the binary and
+ *                         setup capsule_info structure
+ * @cap_info: pointer to current instance of capsule_info structure
+ * @kbuff: a mapped first page buffer pointer
+ * @hdr_bytes: the total received number of bytes for efi header
+ **/
+static ssize_t efi_capsule_setup_info(struct capsule_info *cap_info,
+                                     void *kbuff, size_t hdr_bytes)
+{
+       efi_capsule_header_t *cap_hdr;
+       size_t pages_needed;
+       int ret;
+       void *temp_page;
+
+       /* Only process data block that is larger than efi header size */
+       if (hdr_bytes < sizeof(efi_capsule_header_t))
+               return 0;
+
+       /* Reset back to the correct offset of header */
+       cap_hdr = kbuff - cap_info->count;
+       pages_needed = ALIGN(cap_hdr->imagesize, PAGE_SIZE) >> PAGE_SHIFT;
+
+       if (pages_needed == 0) {
+               pr_err("%s: pages count invalid\n", __func__);
+               return -EINVAL;
+       }
+
+       /* Check if the capsule binary supported */
+       ret = efi_capsule_supported(cap_hdr->guid, cap_hdr->flags,
+                                   cap_hdr->imagesize,
+                                   &cap_info->reset_type);
+       if (ret) {
+               pr_err("%s: efi_capsule_supported() failed\n",
+                      __func__);
+               return ret;
+       }
+
+       cap_info->total_size = cap_hdr->imagesize;
+       temp_page = krealloc(cap_info->pages,
+                            pages_needed * sizeof(void *),
+                            GFP_KERNEL | __GFP_ZERO);
+       if (!temp_page) {
+               pr_debug("%s: krealloc() failed\n", __func__);
+               return -ENOMEM;
+       }
+
+       cap_info->pages = temp_page;
+       cap_info->header_obtained = true;
+
+       return 0;
+}
+
+/**
+ * efi_capsule_submit_update - invoke the efi_capsule_update API once binary
+ *                            upload done
+ * @cap_info: pointer to current instance of capsule_info structure
+ **/
+static ssize_t efi_capsule_submit_update(struct capsule_info *cap_info)
+{
+       int ret;
+       void *cap_hdr_temp;
+
+       cap_hdr_temp = kmap(cap_info->pages[0]);
+       if (!cap_hdr_temp) {
+               pr_debug("%s: kmap() failed\n", __func__);
+               return -EFAULT;
+       }
+
+       ret = efi_capsule_update(cap_hdr_temp, cap_info->pages);
+       kunmap(cap_info->pages[0]);
+       if (ret) {
+               pr_err("%s: efi_capsule_update() failed\n", __func__);
+               return ret;
+       }
+
+       /* Indicate capsule binary uploading is done */
+       cap_info->index = NO_FURTHER_WRITE_ACTION;
+       pr_info("%s: Successfully upload capsule file with reboot type '%s'\n",
+               __func__, !cap_info->reset_type ? "RESET_COLD" :
+               cap_info->reset_type == 1 ? "RESET_WARM" :
+               "RESET_SHUTDOWN");
+       return 0;
+}
+
+/**
+ * efi_capsule_write - store the capsule binary and pass it to
+ *                    efi_capsule_update() API
+ * @file: file pointer
+ * @buff: buffer pointer
+ * @count: number of bytes in @buff
+ * @offp: not used
+ *
+ *     Expectation:
+ *     - A user space tool should start at the beginning of capsule binary and
+ *       pass data in sequentially.
+ *     - Users should close and re-open this file note in order to upload more
+ *       capsules.
+ *     - After an error returned, user should close the file and restart the
+ *       operation for the next try otherwise -EIO will be returned until the
+ *       file is closed.
+ *     - An EFI capsule header must be located at the beginning of capsule
+ *       binary file and passed in as first block data of write operation.
+ **/
+static ssize_t efi_capsule_write(struct file *file, const char __user *buff,
+                                size_t count, loff_t *offp)
+{
+       int ret = 0;
+       struct capsule_info *cap_info = file->private_data;
+       struct page *page;
+       void *kbuff = NULL;
+       size_t write_byte;
+
+       if (count == 0)
+               return 0;
+
+       /* Return error while NO_FURTHER_WRITE_ACTION is flagged */
+       if (cap_info->index < 0)
+               return -EIO;
+
+       /* Only alloc a new page when previous page is full */
+       if (!cap_info->page_bytes_remain) {
+               page = alloc_page(GFP_KERNEL);
+               if (!page) {
+                       pr_debug("%s: alloc_page() failed\n", __func__);
+                       ret = -ENOMEM;
+                       goto failed;
+               }
+
+               cap_info->pages[cap_info->index++] = page;
+               cap_info->page_bytes_remain = PAGE_SIZE;
+       }
+
+       page = cap_info->pages[cap_info->index - 1];
+
+       kbuff = kmap(page);
+       if (!kbuff) {
+               pr_debug("%s: kmap() failed\n", __func__);
+               ret = -EFAULT;
+               goto failed;
+       }
+       kbuff += PAGE_SIZE - cap_info->page_bytes_remain;
+
+       /* Copy capsule binary data from user space to kernel space buffer */
+       write_byte = min_t(size_t, count, cap_info->page_bytes_remain);
+       if (copy_from_user(kbuff, buff, write_byte)) {
+               pr_debug("%s: copy_from_user() failed\n", __func__);
+               ret = -EFAULT;
+               goto fail_unmap;
+       }
+       cap_info->page_bytes_remain -= write_byte;
+
+       /* Setup capsule binary info structure */
+       if (!cap_info->header_obtained) {
+               ret = efi_capsule_setup_info(cap_info, kbuff,
+                                            cap_info->count + write_byte);
+               if (ret)
+                       goto fail_unmap;
+       }
+
+       cap_info->count += write_byte;
+       kunmap(page);
+
+       /* Submit the full binary to efi_capsule_update() API */
+       if (cap_info->header_obtained &&
+           cap_info->count >= cap_info->total_size) {
+               if (cap_info->count > cap_info->total_size) {
+                       pr_err("%s: upload size exceeded header defined size\n",
+                              __func__);
+                       ret = -EINVAL;
+                       goto failed;
+               }
+
+               ret = efi_capsule_submit_update(cap_info);
+               if (ret)
+                       goto failed;
+       }
+
+       return write_byte;
+
+fail_unmap:
+       kunmap(page);
+failed:
+       efi_free_all_buff_pages(cap_info);
+       return ret;
+}
+
+/**
+ * efi_capsule_flush - called by file close or file flush
+ * @file: file pointer
+ * @id: not used
+ *
+ *     If a capsule is being partially uploaded then calling this function
+ *     will be treated as upload termination and will free those completed
+ *     buffer pages and -ECANCELED will be returned.
+ **/
+static int efi_capsule_flush(struct file *file, fl_owner_t id)
+{
+       int ret = 0;
+       struct capsule_info *cap_info = file->private_data;
+
+       if (cap_info->index > 0) {
+               pr_err("%s: capsule upload not complete\n", __func__);
+               efi_free_all_buff_pages(cap_info);
+               ret = -ECANCELED;
+       }
+
+       return ret;
+}
+
+/**
+ * efi_capsule_release - called by file close
+ * @inode: not used
+ * @file: file pointer
+ *
+ *     We will not free successfully submitted pages since efi update
+ *     requires data to be maintained across system reboot.
+ **/
+static int efi_capsule_release(struct inode *inode, struct file *file)
+{
+       struct capsule_info *cap_info = file->private_data;
+
+       kfree(cap_info->pages);
+       kfree(file->private_data);
+       file->private_data = NULL;
+       return 0;
+}
+
+/**
+ * efi_capsule_open - called by file open
+ * @inode: not used
+ * @file: file pointer
+ *
+ *     Will allocate each capsule_info memory for each file open call.
+ *     This provided the capability to support multiple file open feature
+ *     where user is not needed to wait for others to finish in order to
+ *     upload their capsule binary.
+ **/
+static int efi_capsule_open(struct inode *inode, struct file *file)
+{
+       struct capsule_info *cap_info;
+
+       cap_info = kzalloc(sizeof(*cap_info), GFP_KERNEL);
+       if (!cap_info)
+               return -ENOMEM;
+
+       cap_info->pages = kzalloc(sizeof(void *), GFP_KERNEL);
+       if (!cap_info->pages) {
+               kfree(cap_info);
+               return -ENOMEM;
+       }
+
+       file->private_data = cap_info;
+
+       return 0;
+}
+
+static const struct file_operations efi_capsule_fops = {
+       .owner = THIS_MODULE,
+       .open = efi_capsule_open,
+       .write = efi_capsule_write,
+       .flush = efi_capsule_flush,
+       .release = efi_capsule_release,
+       .llseek = no_llseek,
+};
+
+static struct miscdevice efi_capsule_misc = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "efi_capsule_loader",
+       .fops = &efi_capsule_fops,
+};
+
+static int __init efi_capsule_loader_init(void)
+{
+       int ret;
+
+       if (!efi_enabled(EFI_RUNTIME_SERVICES))
+               return -ENODEV;
+
+       ret = misc_register(&efi_capsule_misc);
+       if (ret)
+               pr_err("%s: Failed to register misc char file note\n",
+                      __func__);
+
+       return ret;
+}
+module_init(efi_capsule_loader_init);
+
+static void __exit efi_capsule_loader_exit(void)
+{
+       misc_deregister(&efi_capsule_misc);
+}
+module_exit(efi_capsule_loader_exit);
+
+MODULE_DESCRIPTION("EFI capsule firmware binary loader");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
new file mode 100644 (file)
index 0000000..53b9fd2
--- /dev/null
@@ -0,0 +1,308 @@
+/*
+ * EFI capsule support.
+ *
+ * Copyright 2013 Intel Corporation; author Matt Fleming
+ *
+ * This file is part of the Linux kernel, and is made available under
+ * the terms of the GNU General Public License version 2.
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/highmem.h>
+#include <linux/efi.h>
+#include <linux/vmalloc.h>
+#include <asm/io.h>
+
+typedef struct {
+       u64 length;
+       u64 data;
+} efi_capsule_block_desc_t;
+
+static bool capsule_pending;
+static bool stop_capsules;
+static int efi_reset_type = -1;
+
+/*
+ * capsule_mutex serialises access to both capsule_pending and
+ * efi_reset_type and stop_capsules.
+ */
+static DEFINE_MUTEX(capsule_mutex);
+
+/**
+ * efi_capsule_pending - has a capsule been passed to the firmware?
+ * @reset_type: store the type of EFI reset if capsule is pending
+ *
+ * To ensure that the registered capsule is processed correctly by the
+ * firmware we need to perform a specific type of reset. If a capsule is
+ * pending return the reset type in @reset_type.
+ *
+ * This function will race with callers of efi_capsule_update(), for
+ * example, calling this function while somebody else is in
+ * efi_capsule_update() but hasn't reached efi_capsue_update_locked()
+ * will miss the updates to capsule_pending and efi_reset_type after
+ * efi_capsule_update_locked() completes.
+ *
+ * A non-racy use is from platform reboot code because we use
+ * system_state to ensure no capsules can be sent to the firmware once
+ * we're at SYSTEM_RESTART. See efi_capsule_update_locked().
+ */
+bool efi_capsule_pending(int *reset_type)
+{
+       if (!capsule_pending)
+               return false;
+
+       if (reset_type)
+               *reset_type = efi_reset_type;
+
+       return true;
+}
+
+/*
+ * Whitelist of EFI capsule flags that we support.
+ *
+ * We do not handle EFI_CAPSULE_INITIATE_RESET because that would
+ * require us to prepare the kernel for reboot. Refuse to load any
+ * capsules with that flag and any other flags that we do not know how
+ * to handle.
+ */
+#define EFI_CAPSULE_SUPPORTED_FLAG_MASK                        \
+       (EFI_CAPSULE_PERSIST_ACROSS_RESET | EFI_CAPSULE_POPULATE_SYSTEM_TABLE)
+
+/**
+ * efi_capsule_supported - does the firmware support the capsule?
+ * @guid: vendor guid of capsule
+ * @flags: capsule flags
+ * @size: size of capsule data
+ * @reset: the reset type required for this capsule
+ *
+ * Check whether a capsule with @flags is supported by the firmware
+ * and that @size doesn't exceed the maximum size for a capsule.
+ *
+ * No attempt is made to check @reset against the reset type required
+ * by any pending capsules because of the races involved.
+ */
+int efi_capsule_supported(efi_guid_t guid, u32 flags, size_t size, int *reset)
+{
+       efi_capsule_header_t capsule;
+       efi_capsule_header_t *cap_list[] = { &capsule };
+       efi_status_t status;
+       u64 max_size;
+
+       if (flags & ~EFI_CAPSULE_SUPPORTED_FLAG_MASK)
+               return -EINVAL;
+
+       capsule.headersize = capsule.imagesize = sizeof(capsule);
+       memcpy(&capsule.guid, &guid, sizeof(efi_guid_t));
+       capsule.flags = flags;
+
+       status = efi.query_capsule_caps(cap_list, 1, &max_size, reset);
+       if (status != EFI_SUCCESS)
+               return efi_status_to_err(status);
+
+       if (size > max_size)
+               return -ENOSPC;
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_supported);
+
+/*
+ * Every scatter gather list (block descriptor) page must end with a
+ * continuation pointer. The last continuation pointer of the last
+ * page must be zero to mark the end of the chain.
+ */
+#define SGLIST_PER_PAGE        ((PAGE_SIZE / sizeof(efi_capsule_block_desc_t)) - 1)
+
+/*
+ * How many scatter gather list (block descriptor) pages do we need
+ * to map @count pages?
+ */
+static inline unsigned int sg_pages_num(unsigned int count)
+{
+       return DIV_ROUND_UP(count, SGLIST_PER_PAGE);
+}
+
+/**
+ * efi_capsule_update_locked - pass a single capsule to the firmware
+ * @capsule: capsule to send to the firmware
+ * @sg_pages: array of scatter gather (block descriptor) pages
+ * @reset: the reset type required for @capsule
+ *
+ * Since this function must be called under capsule_mutex check
+ * whether efi_reset_type will conflict with @reset, and atomically
+ * set it and capsule_pending if a capsule was successfully sent to
+ * the firmware.
+ *
+ * We also check to see if the system is about to restart, and if so,
+ * abort. This avoids races between efi_capsule_update() and
+ * efi_capsule_pending().
+ */
+static int
+efi_capsule_update_locked(efi_capsule_header_t *capsule,
+                         struct page **sg_pages, int reset)
+{
+       efi_physical_addr_t sglist_phys;
+       efi_status_t status;
+
+       lockdep_assert_held(&capsule_mutex);
+
+       /*
+        * If someone has already registered a capsule that requires a
+        * different reset type, we're out of luck and must abort.
+        */
+       if (efi_reset_type >= 0 && efi_reset_type != reset) {
+               pr_err("Conflicting capsule reset type %d (%d).\n",
+                      reset, efi_reset_type);
+               return -EINVAL;
+       }
+
+       /*
+        * If the system is getting ready to restart it may have
+        * called efi_capsule_pending() to make decisions (such as
+        * whether to force an EFI reboot), and we're racing against
+        * that call. Abort in that case.
+        */
+       if (unlikely(stop_capsules)) {
+               pr_warn("Capsule update raced with reboot, aborting.\n");
+               return -EINVAL;
+       }
+
+       sglist_phys = page_to_phys(sg_pages[0]);
+
+       status = efi.update_capsule(&capsule, 1, sglist_phys);
+       if (status == EFI_SUCCESS) {
+               capsule_pending = true;
+               efi_reset_type = reset;
+       }
+
+       return efi_status_to_err(status);
+}
+
+/**
+ * efi_capsule_update - send a capsule to the firmware
+ * @capsule: capsule to send to firmware
+ * @pages: an array of capsule data pages
+ *
+ * Build a scatter gather list with EFI capsule block descriptors to
+ * map the capsule described by @capsule with its data in @pages and
+ * send it to the firmware via the UpdateCapsule() runtime service.
+ *
+ * @capsule must be a virtual mapping of the first page in @pages
+ * (@pages[0]) in the kernel address space. That is, a
+ * capsule_header_t that describes the entire contents of the capsule
+ * must be at the start of the first data page.
+ *
+ * Even though this function will validate that the firmware supports
+ * the capsule guid, users will likely want to check that
+ * efi_capsule_supported() returns true before calling this function
+ * because it makes it easier to print helpful error messages.
+ *
+ * If the capsule is successfully submitted to the firmware, any
+ * subsequent calls to efi_capsule_pending() will return true. @pages
+ * must not be released or modified if this function returns
+ * successfully.
+ *
+ * Callers must be prepared for this function to fail, which can
+ * happen if we raced with system reboot or if there is already a
+ * pending capsule that has a reset type that conflicts with the one
+ * required by @capsule. Do NOT use efi_capsule_pending() to detect
+ * this conflict since that would be racy. Instead, submit the capsule
+ * to efi_capsule_update() and check the return value.
+ *
+ * Return 0 on success, a converted EFI status code on failure.
+ */
+int efi_capsule_update(efi_capsule_header_t *capsule, struct page **pages)
+{
+       u32 imagesize = capsule->imagesize;
+       efi_guid_t guid = capsule->guid;
+       unsigned int count, sg_count;
+       u32 flags = capsule->flags;
+       struct page **sg_pages;
+       int rv, reset_type;
+       int i, j;
+
+       rv = efi_capsule_supported(guid, flags, imagesize, &reset_type);
+       if (rv)
+               return rv;
+
+       count = DIV_ROUND_UP(imagesize, PAGE_SIZE);
+       sg_count = sg_pages_num(count);
+
+       sg_pages = kzalloc(sg_count * sizeof(*sg_pages), GFP_KERNEL);
+       if (!sg_pages)
+               return -ENOMEM;
+
+       for (i = 0; i < sg_count; i++) {
+               sg_pages[i] = alloc_page(GFP_KERNEL);
+               if (!sg_pages[i]) {
+                       rv = -ENOMEM;
+                       goto out;
+               }
+       }
+
+       for (i = 0; i < sg_count; i++) {
+               efi_capsule_block_desc_t *sglist;
+
+               sglist = kmap(sg_pages[i]);
+               if (!sglist) {
+                       rv = -ENOMEM;
+                       goto out;
+               }
+
+               for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
+                       u64 sz = min_t(u64, imagesize, PAGE_SIZE);
+
+                       sglist[j].length = sz;
+                       sglist[j].data = page_to_phys(*pages++);
+
+                       imagesize -= sz;
+                       count--;
+               }
+
+               /* Continuation pointer */
+               sglist[j].length = 0;
+
+               if (i + 1 == sg_count)
+                       sglist[j].data = 0;
+               else
+                       sglist[j].data = page_to_phys(sg_pages[i + 1]);
+
+               kunmap(sg_pages[i]);
+       }
+
+       mutex_lock(&capsule_mutex);
+       rv = efi_capsule_update_locked(capsule, sg_pages, reset_type);
+       mutex_unlock(&capsule_mutex);
+
+out:
+       for (i = 0; rv && i < sg_count; i++) {
+               if (sg_pages[i])
+                       __free_page(sg_pages[i]);
+       }
+
+       kfree(sg_pages);
+       return rv;
+}
+EXPORT_SYMBOL_GPL(efi_capsule_update);
+
+static int capsule_reboot_notify(struct notifier_block *nb, unsigned long event, void *cmd)
+{
+       mutex_lock(&capsule_mutex);
+       stop_capsules = true;
+       mutex_unlock(&capsule_mutex);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block capsule_reboot_nb = {
+       .notifier_call = capsule_reboot_notify,
+};
+
+static int __init capsule_reboot_register(void)
+{
+       return register_reboot_notifier(&capsule_reboot_nb);
+}
+core_initcall(capsule_reboot_register);
index 3a69ed5ecfcb5f1254eaf1b0c3fb53cbdbcc61fb..05509f3aaee8feab84d8cdfdf24d1bd4af4f3b77 100644 (file)
@@ -43,6 +43,7 @@ struct efi __read_mostly efi = {
        .config_table           = EFI_INVALID_TABLE_ADDR,
        .esrt                   = EFI_INVALID_TABLE_ADDR,
        .properties_table       = EFI_INVALID_TABLE_ADDR,
+       .mem_attr_table         = EFI_INVALID_TABLE_ADDR,
 };
 EXPORT_SYMBOL(efi);
 
@@ -256,7 +257,7 @@ subsys_initcall(efisubsys_init);
  */
 int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
 {
-       struct efi_memory_map *map = efi.memmap;
+       struct efi_memory_map *map = &efi.memmap;
        phys_addr_t p, e;
 
        if (!efi_enabled(EFI_MEMMAP)) {
@@ -338,6 +339,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
        {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
        {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
        {EFI_PROPERTIES_TABLE_GUID, "PROP", &efi.properties_table},
+       {EFI_MEMORY_ATTRIBUTES_TABLE_GUID, "MEMATTR", &efi.mem_attr_table},
        {NULL_GUID, NULL, NULL},
 };
 
@@ -351,8 +353,9 @@ static __init int match_config_table(efi_guid_t *guid,
                for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
                        if (!efi_guidcmp(*guid, table_types[i].guid)) {
                                *(table_types[i].ptr) = table;
-                               pr_cont(" %s=0x%lx ",
-                                       table_types[i].name, table);
+                               if (table_types[i].name)
+                                       pr_cont(" %s=0x%lx ",
+                                               table_types[i].name, table);
                                return 1;
                        }
                }
@@ -620,16 +623,12 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
  */
 u64 __weak efi_mem_attributes(unsigned long phys_addr)
 {
-       struct efi_memory_map *map;
        efi_memory_desc_t *md;
-       void *p;
 
        if (!efi_enabled(EFI_MEMMAP))
                return 0;
 
-       map = efi.memmap;
-       for (p = map->map; p < map->map_end; p += map->desc_size) {
-               md = p;
+       for_each_efi_memory_desc(md) {
                if ((md->phys_addr <= phys_addr) &&
                    (phys_addr < (md->phys_addr +
                    (md->num_pages << EFI_PAGE_SHIFT))))
@@ -637,3 +636,36 @@ u64 __weak efi_mem_attributes(unsigned long phys_addr)
        }
        return 0;
 }
+
+int efi_status_to_err(efi_status_t status)
+{
+       int err;
+
+       switch (status) {
+       case EFI_SUCCESS:
+               err = 0;
+               break;
+       case EFI_INVALID_PARAMETER:
+               err = -EINVAL;
+               break;
+       case EFI_OUT_OF_RESOURCES:
+               err = -ENOSPC;
+               break;
+       case EFI_DEVICE_ERROR:
+               err = -EIO;
+               break;
+       case EFI_WRITE_PROTECTED:
+               err = -EROFS;
+               break;
+       case EFI_SECURITY_VIOLATION:
+               err = -EACCES;
+               break;
+       case EFI_NOT_FOUND:
+               err = -ENOENT;
+               break;
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
diff --git a/drivers/firmware/efi/efibc.c b/drivers/firmware/efi/efibc.c
new file mode 100644 (file)
index 0000000..8dd0c70
--- /dev/null
@@ -0,0 +1,113 @@
+/*
+ * efibc: control EFI bootloaders which obey LoaderEntryOneShot var
+ * Copyright (c) 2013-2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+
+#define pr_fmt(fmt) "efibc: " fmt
+
+#include <linux/efi.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+
+static void efibc_str_to_str16(const char *str, efi_char16_t *str16)
+{
+       size_t i;
+
+       for (i = 0; i < strlen(str); i++)
+               str16[i] = str[i];
+
+       str16[i] = '\0';
+}
+
+static int efibc_set_variable(const char *name, const char *value)
+{
+       int ret;
+       efi_guid_t guid = LINUX_EFI_LOADER_ENTRY_GUID;
+       struct efivar_entry *entry;
+       size_t size = (strlen(value) + 1) * sizeof(efi_char16_t);
+
+       if (size > sizeof(entry->var.Data)) {
+               pr_err("value is too large");
+               return -EINVAL;
+       }
+
+       entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry) {
+               pr_err("failed to allocate efivar entry");
+               return -ENOMEM;
+       }
+
+       efibc_str_to_str16(name, entry->var.VariableName);
+       efibc_str_to_str16(value, (efi_char16_t *)entry->var.Data);
+       memcpy(&entry->var.VendorGuid, &guid, sizeof(guid));
+
+       ret = efivar_entry_set(entry,
+                              EFI_VARIABLE_NON_VOLATILE
+                              | EFI_VARIABLE_BOOTSERVICE_ACCESS
+                              | EFI_VARIABLE_RUNTIME_ACCESS,
+                              size, entry->var.Data, NULL);
+       if (ret)
+               pr_err("failed to set %s EFI variable: 0x%x\n",
+                      name, ret);
+
+       kfree(entry);
+       return ret;
+}
+
+static int efibc_reboot_notifier_call(struct notifier_block *notifier,
+                                     unsigned long event, void *data)
+{
+       const char *reason = "shutdown";
+       int ret;
+
+       if (event == SYS_RESTART)
+               reason = "reboot";
+
+       ret = efibc_set_variable("LoaderEntryRebootReason", reason);
+       if (ret || !data)
+               return NOTIFY_DONE;
+
+       efibc_set_variable("LoaderEntryOneShot", (char *)data);
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block efibc_reboot_notifier = {
+       .notifier_call = efibc_reboot_notifier_call,
+};
+
+static int __init efibc_init(void)
+{
+       int ret;
+
+       if (!efi_enabled(EFI_RUNTIME_SERVICES))
+               return -ENODEV;
+
+       ret = register_reboot_notifier(&efibc_reboot_notifier);
+       if (ret)
+               pr_err("unable to register reboot notifier\n");
+
+       return ret;
+}
+module_init(efibc_init);
+
+static void __exit efibc_exit(void)
+{
+       unregister_reboot_notifier(&efibc_reboot_notifier);
+}
+module_exit(efibc_exit);
+
+MODULE_AUTHOR("Jeremy Compostella <jeremy.compostella@intel.com>");
+MODULE_AUTHOR("Matt Gumbel <matthew.k.gumbel@intel.com");
+MODULE_DESCRIPTION("EFI Bootloader Control");
+MODULE_LICENSE("GPL v2");
index 096adcbcb5a99720c77bc69af458bf5f9628dbd4..116b244dee68283016f7696ac6358eb800ea70a0 100644 (file)
@@ -661,7 +661,7 @@ static void efivar_update_sysfs_entries(struct work_struct *work)
                        return;
 
                err = efivar_init(efivar_update_sysfs_entry, entry,
-                                 true, false, &efivar_sysfs_list);
+                                 false, &efivar_sysfs_list);
                if (!err)
                        break;
 
@@ -730,8 +730,7 @@ int efivars_sysfs_init(void)
                return -ENOMEM;
        }
 
-       efivar_init(efivars_sysfs_callback, NULL, false,
-                   true, &efivar_sysfs_list);
+       efivar_init(efivars_sysfs_callback, NULL, true, &efivar_sysfs_list);
 
        error = create_efivars_bin_attributes();
        if (error) {
index ed3a854950cca80b0a29bb8a38362d61b083b701..48430aba13c189d3cad7f04c6e5ee036701f113c 100644 (file)
@@ -57,7 +57,7 @@ static int __init cmp_fake_mem(const void *x1, const void *x2)
 void __init efi_fake_memmap(void)
 {
        u64 start, end, m_start, m_end, m_attr;
-       int new_nr_map = memmap.nr_map;
+       int new_nr_map = efi.memmap.nr_map;
        efi_memory_desc_t *md;
        phys_addr_t new_memmap_phy;
        void *new_memmap;
@@ -68,8 +68,7 @@ void __init efi_fake_memmap(void)
                return;
 
        /* count up the number of EFI memory descriptor */
-       for (old = memmap.map; old < memmap.map_end; old += memmap.desc_size) {
-               md = old;
+       for_each_efi_memory_desc(md) {
                start = md->phys_addr;
                end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
 
@@ -95,25 +94,25 @@ void __init efi_fake_memmap(void)
        }
 
        /* allocate memory for new EFI memmap */
-       new_memmap_phy = memblock_alloc(memmap.desc_size * new_nr_map,
+       new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
                                        PAGE_SIZE);
        if (!new_memmap_phy)
                return;
 
        /* create new EFI memmap */
        new_memmap = early_memremap(new_memmap_phy,
-                                   memmap.desc_size * new_nr_map);
+                                   efi.memmap.desc_size * new_nr_map);
        if (!new_memmap) {
-               memblock_free(new_memmap_phy, memmap.desc_size * new_nr_map);
+               memblock_free(new_memmap_phy, efi.memmap.desc_size * new_nr_map);
                return;
        }
 
-       for (old = memmap.map, new = new_memmap;
-            old < memmap.map_end;
-            old += memmap.desc_size, new += memmap.desc_size) {
+       for (old = efi.memmap.map, new = new_memmap;
+            old < efi.memmap.map_end;
+            old += efi.memmap.desc_size, new += efi.memmap.desc_size) {
 
                /* copy original EFI memory descriptor */
-               memcpy(new, old, memmap.desc_size);
+               memcpy(new, old, efi.memmap.desc_size);
                md = new;
                start = md->phys_addr;
                end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
@@ -134,8 +133,8 @@ void __init efi_fake_memmap(void)
                                md->num_pages = (m_end - md->phys_addr + 1) >>
                                        EFI_PAGE_SHIFT;
                                /* latter part */
-                               new += memmap.desc_size;
-                               memcpy(new, old, memmap.desc_size);
+                               new += efi.memmap.desc_size;
+                               memcpy(new, old, efi.memmap.desc_size);
                                md = new;
                                md->phys_addr = m_end + 1;
                                md->num_pages = (end - md->phys_addr + 1) >>
@@ -147,16 +146,16 @@ void __init efi_fake_memmap(void)
                                md->num_pages = (m_start - md->phys_addr) >>
                                        EFI_PAGE_SHIFT;
                                /* middle part */
-                               new += memmap.desc_size;
-                               memcpy(new, old, memmap.desc_size);
+                               new += efi.memmap.desc_size;
+                               memcpy(new, old, efi.memmap.desc_size);
                                md = new;
                                md->attribute |= m_attr;
                                md->phys_addr = m_start;
                                md->num_pages = (m_end - m_start + 1) >>
                                        EFI_PAGE_SHIFT;
                                /* last part */
-                               new += memmap.desc_size;
-                               memcpy(new, old, memmap.desc_size);
+                               new += efi.memmap.desc_size;
+                               memcpy(new, old, efi.memmap.desc_size);
                                md = new;
                                md->phys_addr = m_end + 1;
                                md->num_pages = (end - m_end) >>
@@ -169,8 +168,8 @@ void __init efi_fake_memmap(void)
                                md->num_pages = (m_start - md->phys_addr) >>
                                        EFI_PAGE_SHIFT;
                                /* latter part */
-                               new += memmap.desc_size;
-                               memcpy(new, old, memmap.desc_size);
+                               new += efi.memmap.desc_size;
+                               memcpy(new, old, efi.memmap.desc_size);
                                md = new;
                                md->phys_addr = m_start;
                                md->num_pages = (end - md->phys_addr + 1) >>
@@ -182,10 +181,10 @@ void __init efi_fake_memmap(void)
 
        /* swap into new EFI memmap */
        efi_unmap_memmap();
-       memmap.map = new_memmap;
-       memmap.phys_map = new_memmap_phy;
-       memmap.nr_map = new_nr_map;
-       memmap.map_end = memmap.map + memmap.nr_map * memmap.desc_size;
+       efi.memmap.map = new_memmap;
+       efi.memmap.phys_map = new_memmap_phy;
+       efi.memmap.nr_map = new_nr_map;
+       efi.memmap.map_end = efi.memmap.map + efi.memmap.nr_map * efi.memmap.desc_size;
        set_bit(EFI_MEMMAP, &efi.flags);
 
        /* print new EFI memmap */
index da99bbb74aebde62fc62128bed5d4ba2ee801212..c06945160a4154a5f8d518192b7adb16c4ab325d 100644 (file)
@@ -28,7 +28,7 @@ OBJECT_FILES_NON_STANDARD     := y
 # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
 KCOV_INSTRUMENT                        := n
 
-lib-y                          := efi-stub-helper.o
+lib-y                          := efi-stub-helper.o gop.o
 
 # include the stub's generic dependencies from lib/ when building for ARM/arm64
 arm-deps := fdt_rw.c fdt_ro.c fdt_wip.c fdt.c fdt_empty_tree.c fdt_sw.c sort.c
index 414deb85c2e5214d4dc3a4cb85f4f8a20eb2df05..993aa56755f60afb929f90521485fd5fab335583 100644 (file)
 
 bool __nokaslr;
 
-static int efi_secureboot_enabled(efi_system_table_t *sys_table_arg)
+static int efi_get_secureboot(efi_system_table_t *sys_table_arg)
 {
-       static efi_guid_t const var_guid = EFI_GLOBAL_VARIABLE_GUID;
-       static efi_char16_t const var_name[] = {
+       static efi_char16_t const sb_var_name[] = {
                'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 };
+       static efi_char16_t const sm_var_name[] = {
+               'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 };
 
+       efi_guid_t var_guid = EFI_GLOBAL_VARIABLE_GUID;
        efi_get_variable_t *f_getvar = sys_table_arg->runtime->get_variable;
-       unsigned long size = sizeof(u8);
-       efi_status_t status;
        u8 val;
+       unsigned long size = sizeof(val);
+       efi_status_t status;
 
-       status = f_getvar((efi_char16_t *)var_name, (efi_guid_t *)&var_guid,
+       status = f_getvar((efi_char16_t *)sb_var_name, (efi_guid_t *)&var_guid,
                          NULL, &size, &val);
 
+       if (status != EFI_SUCCESS)
+               goto out_efi_err;
+
+       if (val == 0)
+               return 0;
+
+       status = f_getvar((efi_char16_t *)sm_var_name, (efi_guid_t *)&var_guid,
+                         NULL, &size, &val);
+
+       if (status != EFI_SUCCESS)
+               goto out_efi_err;
+
+       if (val == 1)
+               return 0;
+
+       return 1;
+
+out_efi_err:
        switch (status) {
-       case EFI_SUCCESS:
-               return val;
        case EFI_NOT_FOUND:
                return 0;
+       case EFI_DEVICE_ERROR:
+               return -EIO;
+       case EFI_SECURITY_VIOLATION:
+               return -EACCES;
        default:
-               return 1;
+               return -EINVAL;
        }
 }
 
@@ -147,6 +169,25 @@ void efi_char16_printk(efi_system_table_t *sys_table_arg,
        out->output_string(out, str);
 }
 
+static struct screen_info *setup_graphics(efi_system_table_t *sys_table_arg)
+{
+       efi_guid_t gop_proto = EFI_GRAPHICS_OUTPUT_PROTOCOL_GUID;
+       efi_status_t status;
+       unsigned long size;
+       void **gop_handle = NULL;
+       struct screen_info *si = NULL;
+
+       size = 0;
+       status = efi_call_early(locate_handle, EFI_LOCATE_BY_PROTOCOL,
+                               &gop_proto, NULL, &size, gop_handle);
+       if (status == EFI_BUFFER_TOO_SMALL) {
+               si = alloc_screen_info(sys_table_arg);
+               if (!si)
+                       return NULL;
+               efi_setup_gop(sys_table_arg, si, &gop_proto, size);
+       }
+       return si;
+}
 
 /*
  * This function handles the architcture specific differences between arm and
@@ -185,6 +226,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        efi_guid_t loaded_image_proto = LOADED_IMAGE_PROTOCOL_GUID;
        unsigned long reserve_addr = 0;
        unsigned long reserve_size = 0;
+       int secure_boot = 0;
+       struct screen_info *si;
 
        /* Check if we were booted by the EFI firmware */
        if (sys_table->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE)
@@ -237,6 +280,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
                        __nokaslr = true;
        }
 
+       si = setup_graphics(sys_table);
+
        status = handle_kernel_image(sys_table, image_addr, &image_size,
                                     &reserve_addr,
                                     &reserve_size,
@@ -250,12 +295,21 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table,
        if (status != EFI_SUCCESS)
                pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
 
+       secure_boot = efi_get_secureboot(sys_table);
+       if (secure_boot > 0)
+               pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+
+       if (secure_boot < 0) {
+               pr_efi_err(sys_table,
+                       "could not determine UEFI Secure Boot status.\n");
+       }
+
        /*
         * Unauthenticated device tree data is a security hazard, so
         * ignore 'dtb=' unless UEFI Secure Boot is disabled.
         */
-       if (efi_secureboot_enabled(sys_table)) {
-               pr_efi(sys_table, "UEFI Secure Boot is enabled.\n");
+       if (secure_boot != 0 && strstr(cmdline_ptr, "dtb=")) {
+               pr_efi(sys_table, "Ignoring DTB from command line.\n");
        } else {
                status = handle_cmdline_files(sys_table, image, cmdline_ptr,
                                              "dtb=",
@@ -309,6 +363,7 @@ fail_free_image:
        efi_free(sys_table, image_size, *image_addr);
        efi_free(sys_table, reserve_size, reserve_addr);
 fail_free_cmdline:
+       free_screen_info(sys_table, si);
        efi_free(sys_table, cmdline_size, (unsigned long)cmdline_ptr);
 fail:
        return EFI_ERROR;
index 6f42be4d0084f1befcceb329758513b5f2c50781..e1f0b28e1dcbbf9cde9a1e882ba6212fb165c55e 100644 (file)
@@ -26,6 +26,43 @@ efi_status_t check_platform_features(efi_system_table_t *sys_table_arg)
        return EFI_SUCCESS;
 }
 
+static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
+
+struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg)
+{
+       struct screen_info *si;
+       efi_status_t status;
+
+       /*
+        * Unlike on arm64, where we can directly fill out the screen_info
+        * structure from the stub, we need to allocate a buffer to hold
+        * its contents while we hand over to the kernel proper from the
+        * decompressor.
+        */
+       status = efi_call_early(allocate_pool, EFI_RUNTIME_SERVICES_DATA,
+                               sizeof(*si), (void **)&si);
+
+       if (status != EFI_SUCCESS)
+               return NULL;
+
+       status = efi_call_early(install_configuration_table,
+                               &screen_info_guid, si);
+       if (status == EFI_SUCCESS)
+               return si;
+
+       efi_call_early(free_pool, si);
+       return NULL;
+}
+
+void free_screen_info(efi_system_table_t *sys_table_arg, struct screen_info *si)
+{
+       if (!si)
+               return;
+
+       efi_call_early(install_configuration_table, &screen_info_guid, NULL);
+       efi_call_early(free_pool, si);
+}
+
 efi_status_t handle_kernel_image(efi_system_table_t *sys_table,
                                 unsigned long *image_addr,
                                 unsigned long *image_size,
index 29ed2f9b218ca9892bfcc72da2d91ba4750f4c97..3bd127f953151dff89d0563db13deb12046e4315 100644 (file)
@@ -125,10 +125,12 @@ unsigned long get_dram_base(efi_system_table_t *sys_table_arg)
 
        map.map_end = map.map + map_size;
 
-       for_each_efi_memory_desc(&map, md)
-               if (md->attribute & EFI_MEMORY_WB)
+       for_each_efi_memory_desc_in_map(&map, md) {
+               if (md->attribute & EFI_MEMORY_WB) {
                        if (membase > md->phys_addr)
                                membase = md->phys_addr;
+               }
+       }
 
        efi_call_early(free_pool, map.map);
 
diff --git a/drivers/firmware/efi/libstub/gop.c b/drivers/firmware/efi/libstub/gop.c
new file mode 100644 (file)
index 0000000..932742e
--- /dev/null
@@ -0,0 +1,354 @@
+/* -----------------------------------------------------------------------
+ *
+ *   Copyright 2011 Intel Corporation; author Matt Fleming
+ *
+ *   This file is part of the Linux kernel, and is made available under
+ *   the terms of the GNU General Public License version 2.
+ *
+ * ----------------------------------------------------------------------- */
+
+#include <linux/efi.h>
+#include <linux/screen_info.h>
+#include <asm/efi.h>
+#include <asm/setup.h>
+
+static void find_bits(unsigned long mask, u8 *pos, u8 *size)
+{
+       u8 first, len;
+
+       first = 0;
+       len = 0;
+
+       if (mask) {
+               while (!(mask & 0x1)) {
+                       mask = mask >> 1;
+                       first++;
+               }
+
+               while (mask & 0x1) {
+                       mask = mask >> 1;
+                       len++;
+               }
+       }
+
+       *pos = first;
+       *size = len;
+}
+
+static void
+setup_pixel_info(struct screen_info *si, u32 pixels_per_scan_line,
+                struct efi_pixel_bitmask pixel_info, int pixel_format)
+{
+       if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) {
+               si->lfb_depth = 32;
+               si->lfb_linelength = pixels_per_scan_line * 4;
+               si->red_size = 8;
+               si->red_pos = 0;
+               si->green_size = 8;
+               si->green_pos = 8;
+               si->blue_size = 8;
+               si->blue_pos = 16;
+               si->rsvd_size = 8;
+               si->rsvd_pos = 24;
+       } else if (pixel_format == PIXEL_BGR_RESERVED_8BIT_PER_COLOR) {
+               si->lfb_depth = 32;
+               si->lfb_linelength = pixels_per_scan_line * 4;
+               si->red_size = 8;
+               si->red_pos = 16;
+               si->green_size = 8;
+               si->green_pos = 8;
+               si->blue_size = 8;
+               si->blue_pos = 0;
+               si->rsvd_size = 8;
+               si->rsvd_pos = 24;
+       } else if (pixel_format == PIXEL_BIT_MASK) {
+               find_bits(pixel_info.red_mask, &si->red_pos, &si->red_size);
+               find_bits(pixel_info.green_mask, &si->green_pos,
+                         &si->green_size);
+               find_bits(pixel_info.blue_mask, &si->blue_pos, &si->blue_size);
+               find_bits(pixel_info.reserved_mask, &si->rsvd_pos,
+                         &si->rsvd_size);
+               si->lfb_depth = si->red_size + si->green_size +
+                       si->blue_size + si->rsvd_size;
+               si->lfb_linelength = (pixels_per_scan_line * si->lfb_depth) / 8;
+       } else {
+               si->lfb_depth = 4;
+               si->lfb_linelength = si->lfb_width / 2;
+               si->red_size = 0;
+               si->red_pos = 0;
+               si->green_size = 0;
+               si->green_pos = 0;
+               si->blue_size = 0;
+               si->blue_pos = 0;
+               si->rsvd_size = 0;
+               si->rsvd_pos = 0;
+       }
+}
+
+static efi_status_t
+__gop_query32(efi_system_table_t *sys_table_arg,
+             struct efi_graphics_output_protocol_32 *gop32,
+             struct efi_graphics_output_mode_info **info,
+             unsigned long *size, u64 *fb_base)
+{
+       struct efi_graphics_output_protocol_mode_32 *mode;
+       efi_graphics_output_protocol_query_mode query_mode;
+       efi_status_t status;
+       unsigned long m;
+
+       m = gop32->mode;
+       mode = (struct efi_graphics_output_protocol_mode_32 *)m;
+       query_mode = (void *)(unsigned long)gop32->query_mode;
+
+       status = __efi_call_early(query_mode, (void *)gop32, mode->mode, size,
+                                 info);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       *fb_base = mode->frame_buffer_base;
+       return status;
+}
+
+static efi_status_t
+setup_gop32(efi_system_table_t *sys_table_arg, struct screen_info *si,
+            efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+       struct efi_graphics_output_protocol_32 *gop32, *first_gop;
+       unsigned long nr_gops;
+       u16 width, height;
+       u32 pixels_per_scan_line;
+       u32 ext_lfb_base;
+       u64 fb_base;
+       struct efi_pixel_bitmask pixel_info;
+       int pixel_format;
+       efi_status_t status = EFI_NOT_FOUND;
+       u32 *handles = (u32 *)(unsigned long)gop_handle;
+       int i;
+
+       first_gop = NULL;
+       gop32 = NULL;
+
+       nr_gops = size / sizeof(u32);
+       for (i = 0; i < nr_gops; i++) {
+               struct efi_graphics_output_mode_info *info = NULL;
+               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+               bool conout_found = false;
+               void *dummy = NULL;
+               efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+               u64 current_fb_base;
+
+               status = efi_call_early(handle_protocol, h,
+                                       proto, (void **)&gop32);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               status = efi_call_early(handle_protocol, h,
+                                       &conout_proto, &dummy);
+               if (status == EFI_SUCCESS)
+                       conout_found = true;
+
+               status = __gop_query32(sys_table_arg, gop32, &info, &size,
+                                      &current_fb_base);
+               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+                       /*
+                        * Systems that use the UEFI Console Splitter may
+                        * provide multiple GOP devices, not all of which are
+                        * backed by real hardware. The workaround is to search
+                        * for a GOP implementing the ConOut protocol, and if
+                        * one isn't found, to just fall back to the first GOP.
+                        */
+                       width = info->horizontal_resolution;
+                       height = info->vertical_resolution;
+                       pixel_format = info->pixel_format;
+                       pixel_info = info->pixel_information;
+                       pixels_per_scan_line = info->pixels_per_scan_line;
+                       fb_base = current_fb_base;
+
+                       /*
+                        * Once we've found a GOP supporting ConOut,
+                        * don't bother looking any further.
+                        */
+                       first_gop = gop32;
+                       if (conout_found)
+                               break;
+               }
+       }
+
+       /* Did we find any GOPs? */
+       if (!first_gop)
+               goto out;
+
+       /* EFI framebuffer */
+       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+       si->lfb_width = width;
+       si->lfb_height = height;
+       si->lfb_base = fb_base;
+
+       ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+       if (ext_lfb_base) {
+               si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+               si->ext_lfb_base = ext_lfb_base;
+       }
+
+       si->pages = 1;
+
+       setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+       si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+       si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+       return status;
+}
+
+static efi_status_t
+__gop_query64(efi_system_table_t *sys_table_arg,
+             struct efi_graphics_output_protocol_64 *gop64,
+             struct efi_graphics_output_mode_info **info,
+             unsigned long *size, u64 *fb_base)
+{
+       struct efi_graphics_output_protocol_mode_64 *mode;
+       efi_graphics_output_protocol_query_mode query_mode;
+       efi_status_t status;
+       unsigned long m;
+
+       m = gop64->mode;
+       mode = (struct efi_graphics_output_protocol_mode_64 *)m;
+       query_mode = (void *)(unsigned long)gop64->query_mode;
+
+       status = __efi_call_early(query_mode, (void *)gop64, mode->mode, size,
+                                 info);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       *fb_base = mode->frame_buffer_base;
+       return status;
+}
+
+static efi_status_t
+setup_gop64(efi_system_table_t *sys_table_arg, struct screen_info *si,
+           efi_guid_t *proto, unsigned long size, void **gop_handle)
+{
+       struct efi_graphics_output_protocol_64 *gop64, *first_gop;
+       unsigned long nr_gops;
+       u16 width, height;
+       u32 pixels_per_scan_line;
+       u32 ext_lfb_base;
+       u64 fb_base;
+       struct efi_pixel_bitmask pixel_info;
+       int pixel_format;
+       efi_status_t status = EFI_NOT_FOUND;
+       u64 *handles = (u64 *)(unsigned long)gop_handle;
+       int i;
+
+       first_gop = NULL;
+       gop64 = NULL;
+
+       nr_gops = size / sizeof(u64);
+       for (i = 0; i < nr_gops; i++) {
+               struct efi_graphics_output_mode_info *info = NULL;
+               efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID;
+               bool conout_found = false;
+               void *dummy = NULL;
+               efi_handle_t h = (efi_handle_t)(unsigned long)handles[i];
+               u64 current_fb_base;
+
+               status = efi_call_early(handle_protocol, h,
+                                       proto, (void **)&gop64);
+               if (status != EFI_SUCCESS)
+                       continue;
+
+               status = efi_call_early(handle_protocol, h,
+                                       &conout_proto, &dummy);
+               if (status == EFI_SUCCESS)
+                       conout_found = true;
+
+               status = __gop_query64(sys_table_arg, gop64, &info, &size,
+                                      &current_fb_base);
+               if (status == EFI_SUCCESS && (!first_gop || conout_found)) {
+                       /*
+                        * Systems that use the UEFI Console Splitter may
+                        * provide multiple GOP devices, not all of which are
+                        * backed by real hardware. The workaround is to search
+                        * for a GOP implementing the ConOut protocol, and if
+                        * one isn't found, to just fall back to the first GOP.
+                        */
+                       width = info->horizontal_resolution;
+                       height = info->vertical_resolution;
+                       pixel_format = info->pixel_format;
+                       pixel_info = info->pixel_information;
+                       pixels_per_scan_line = info->pixels_per_scan_line;
+                       fb_base = current_fb_base;
+
+                       /*
+                        * Once we've found a GOP supporting ConOut,
+                        * don't bother looking any further.
+                        */
+                       first_gop = gop64;
+                       if (conout_found)
+                               break;
+               }
+       }
+
+       /* Did we find any GOPs? */
+       if (!first_gop)
+               goto out;
+
+       /* EFI framebuffer */
+       si->orig_video_isVGA = VIDEO_TYPE_EFI;
+
+       si->lfb_width = width;
+       si->lfb_height = height;
+       si->lfb_base = fb_base;
+
+       ext_lfb_base = (u64)(unsigned long)fb_base >> 32;
+       if (ext_lfb_base) {
+               si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+               si->ext_lfb_base = ext_lfb_base;
+       }
+
+       si->pages = 1;
+
+       setup_pixel_info(si, pixels_per_scan_line, pixel_info, pixel_format);
+
+       si->lfb_size = si->lfb_linelength * si->lfb_height;
+
+       si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS;
+out:
+       return status;
+}
+
+/*
+ * See if we have Graphics Output Protocol
+ */
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+                          struct screen_info *si, efi_guid_t *proto,
+                          unsigned long size)
+{
+       efi_status_t status;
+       void **gop_handle = NULL;
+
+       status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
+                               size, (void **)&gop_handle);
+       if (status != EFI_SUCCESS)
+               return status;
+
+       status = efi_call_early(locate_handle,
+                               EFI_LOCATE_BY_PROTOCOL,
+                               proto, NULL, &size, gop_handle);
+       if (status != EFI_SUCCESS)
+               goto free_handle;
+
+       if (efi_is_64bit()) {
+               status = setup_gop64(sys_table_arg, si, proto, size,
+                                    gop_handle);
+       } else {
+               status = setup_gop32(sys_table_arg, si, proto, size,
+                                    gop_handle);
+       }
+
+free_handle:
+       efi_call_early(free_pool, gop_handle);
+       return status;
+}
diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
new file mode 100644 (file)
index 0000000..236004b
--- /dev/null
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2016 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)    "efi: memattr: " fmt
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+
+#include <asm/early_ioremap.h>
+
+static int __initdata tbl_size;
+
+/*
+ * Reserve the memory associated with the Memory Attributes configuration
+ * table, if it exists.
+ */
+int __init efi_memattr_init(void)
+{
+       efi_memory_attributes_table_t *tbl;
+
+       if (efi.mem_attr_table == EFI_INVALID_TABLE_ADDR)
+               return 0;
+
+       tbl = early_memremap(efi.mem_attr_table, sizeof(*tbl));
+       if (!tbl) {
+               pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+                      efi.mem_attr_table);
+               return -ENOMEM;
+       }
+
+       if (tbl->version > 1) {
+               pr_warn("Unexpected EFI Memory Attributes table version %d\n",
+                       tbl->version);
+               goto unmap;
+       }
+
+       tbl_size = sizeof(*tbl) + tbl->num_entries * tbl->desc_size;
+       memblock_reserve(efi.mem_attr_table, tbl_size);
+
+unmap:
+       early_memunmap(tbl, sizeof(*tbl));
+       return 0;
+}
+
+/*
+ * Returns a copy @out of the UEFI memory descriptor @in if it is covered
+ * entirely by a UEFI memory map entry with matching attributes. The virtual
+ * address of @out is set according to the matching entry that was found.
+ */
+static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
+{
+       u64 in_paddr = in->phys_addr;
+       u64 in_size = in->num_pages << EFI_PAGE_SHIFT;
+       efi_memory_desc_t *md;
+
+       *out = *in;
+
+       if (in->type != EFI_RUNTIME_SERVICES_CODE &&
+           in->type != EFI_RUNTIME_SERVICES_DATA) {
+               pr_warn("Entry type should be RuntimeServiceCode/Data\n");
+               return false;
+       }
+
+       if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
+               pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
+               return false;
+       }
+
+       if (PAGE_SIZE > EFI_PAGE_SIZE &&
+           (!PAGE_ALIGNED(in->phys_addr) ||
+            !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {
+               /*
+                * Since arm64 may execute with page sizes of up to 64 KB, the
+                * UEFI spec mandates that RuntimeServices memory regions must
+                * be 64 KB aligned. We need to validate this here since we will
+                * not be able to tighten permissions on such regions without
+                * affecting adjacent regions.
+                */
+               pr_warn("Entry address region misaligned\n");
+               return false;
+       }
+
+       for_each_efi_memory_desc(md) {
+               u64 md_paddr = md->phys_addr;
+               u64 md_size = md->num_pages << EFI_PAGE_SHIFT;
+
+               if (!(md->attribute & EFI_MEMORY_RUNTIME))
+                       continue;
+               if (md->virt_addr == 0) {
+                       /* no virtual mapping has been installed by the stub */
+                       break;
+               }
+
+               if (md_paddr > in_paddr || (in_paddr - md_paddr) >= md_size)
+                       continue;
+
+               /*
+                * This entry covers the start of @in, check whether
+                * it covers the end as well.
+                */
+               if (md_paddr + md_size < in_paddr + in_size) {
+                       pr_warn("Entry covers multiple EFI memory map regions\n");
+                       return false;
+               }
+
+               if (md->type != in->type) {
+                       pr_warn("Entry type deviates from EFI memory map region type\n");
+                       return false;
+               }
+
+               out->virt_addr = in_paddr + (md->virt_addr - md_paddr);
+
+               return true;
+       }
+
+       pr_warn("No matching entry found in the EFI memory map\n");
+       return false;
+}
+
+/*
+ * To be called after the EFI page tables have been populated. If a memory
+ * attributes table is available, its contents will be used to update the
+ * mappings with tightened permissions as described by the table.
+ * This requires the UEFI memory map to have already been populated with
+ * virtual addresses.
+ */
+int __init efi_memattr_apply_permissions(struct mm_struct *mm,
+                                        efi_memattr_perm_setter fn)
+{
+       efi_memory_attributes_table_t *tbl;
+       int i, ret;
+
+       if (tbl_size <= sizeof(*tbl))
+               return 0;
+
+       /*
+        * We need the EFI memory map to be setup so we can use it to
+        * lookup the virtual addresses of all entries in the  of EFI
+        * Memory Attributes table. If it isn't available, this
+        * function should not be called.
+        */
+       if (WARN_ON(!efi_enabled(EFI_MEMMAP)))
+               return 0;
+
+       tbl = memremap(efi.mem_attr_table, tbl_size, MEMREMAP_WB);
+       if (!tbl) {
+               pr_err("Failed to map EFI Memory Attributes table @ 0x%lx\n",
+                      efi.mem_attr_table);
+               return -ENOMEM;
+       }
+
+       if (efi_enabled(EFI_DBG))
+               pr_info("Processing EFI Memory Attributes table:\n");
+
+       for (i = ret = 0; ret == 0 && i < tbl->num_entries; i++) {
+               efi_memory_desc_t md;
+               unsigned long size;
+               bool valid;
+               char buf[64];
+
+               valid = entry_is_valid((void *)tbl->entry + i * tbl->desc_size,
+                                      &md);
+               size = md.num_pages << EFI_PAGE_SHIFT;
+               if (efi_enabled(EFI_DBG) || !valid)
+                       pr_info("%s 0x%012llx-0x%012llx %s\n",
+                               valid ? "" : "!", md.phys_addr,
+                               md.phys_addr + size - 1,
+                               efi_md_typeattr_format(buf, sizeof(buf), &md));
+
+               if (valid)
+                       ret = fn(mm, &md);
+       }
+       memunmap(tbl);
+       return ret;
+}
index 9c59d1c795d1f2d6787b9a3a248f384e4017e8f1..62ead9b9d871a606af7c0b575f382377f7f82aaa 100644 (file)
@@ -9,7 +9,8 @@ int efi_reboot_quirk_mode = -1;
 
 void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
 {
-       int efi_mode;
+       const char *str[] = { "cold", "warm", "shutdown", "platform" };
+       int efi_mode, cap_reset_mode;
 
        if (!efi_enabled(EFI_RUNTIME_SERVICES))
                return;
@@ -30,6 +31,15 @@ void efi_reboot(enum reboot_mode reboot_mode, const char *__unused)
        if (efi_reboot_quirk_mode != -1)
                efi_mode = efi_reboot_quirk_mode;
 
+       if (efi_capsule_pending(&cap_reset_mode)) {
+               if (efi_mode != cap_reset_mode)
+                       printk(KERN_CRIT "efi: %s reset requested but pending "
+                              "capsule update requires %s reset... Performing "
+                              "%s reset.\n", str[efi_mode], str[cap_reset_mode],
+                              str[cap_reset_mode]);
+               efi_mode = cap_reset_mode;
+       }
+
        efi.reset_system(efi_mode, EFI_SUCCESS, 0, NULL);
 }
 
index de6953039af65255b1afe36d059fb2893fdcd6f9..23bef6bb73ee58db932e7fd34100e4d850e73a4a 100644 (file)
 
 #include <linux/bug.h>
 #include <linux/efi.h>
+#include <linux/irqflags.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
+#include <linux/stringify.h>
 #include <asm/efi.h>
 
+static void efi_call_virt_check_flags(unsigned long flags, const char *call)
+{
+       unsigned long cur_flags, mismatch;
+
+       local_save_flags(cur_flags);
+
+       mismatch = flags ^ cur_flags;
+       if (!WARN_ON_ONCE(mismatch & ARCH_EFI_IRQ_FLAGS_MASK))
+               return;
+
+       add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_NOW_UNRELIABLE);
+       pr_err_ratelimited(FW_BUG "IRQ flags corrupted (0x%08lx=>0x%08lx) by EFI %s\n",
+                          flags, cur_flags, call);
+       local_irq_restore(flags);
+}
+
+/*
+ * Arch code can implement the following three template macros, avoiding
+ * reptition for the void/non-void return cases of {__,}efi_call_virt:
+ *
+ *  * arch_efi_call_virt_setup
+ *
+ *    Sets up the environment for the call (e.g. switching page tables,
+ *    allowing kernel-mode use of floating point, if required).
+ *
+ *  * arch_efi_call_virt
+ *
+ *    Performs the call. The last expression in the macro must be the call
+ *    itself, allowing the logic to be shared by the void and non-void
+ *    cases.
+ *
+ *  * arch_efi_call_virt_teardown
+ *
+ *    Restores the usual kernel environment once the call has returned.
+ */
+
+#define efi_call_virt(f, args...)                                      \
+({                                                                     \
+       efi_status_t __s;                                               \
+       unsigned long flags;                                            \
+       arch_efi_call_virt_setup();                                     \
+       local_save_flags(flags);                                        \
+       __s = arch_efi_call_virt(f, args);                              \
+       efi_call_virt_check_flags(flags, __stringify(f));               \
+       arch_efi_call_virt_teardown();                                  \
+       __s;                                                            \
+})
+
+#define __efi_call_virt(f, args...)                                    \
+({                                                                     \
+       unsigned long flags;                                            \
+       arch_efi_call_virt_setup();                                     \
+       local_save_flags(flags);                                        \
+       arch_efi_call_virt(f, args);                                    \
+       efi_call_virt_check_flags(flags, __stringify(f));               \
+       arch_efi_call_virt_teardown();                                  \
+})
+
 /*
  * According to section 7.1 of the UEFI spec, Runtime Services are not fully
  * reentrant, and there are particular combinations of calls that need to be
index 34b741940494a24682e6cbf0e96d3246468a4fd6..d3b75138328665f0be09e2192199831473499502 100644 (file)
@@ -329,39 +329,6 @@ check_var_size_nonblocking(u32 attributes, unsigned long size)
        return fops->query_variable_store(attributes, size, true);
 }
 
-static int efi_status_to_err(efi_status_t status)
-{
-       int err;
-
-       switch (status) {
-       case EFI_SUCCESS:
-               err = 0;
-               break;
-       case EFI_INVALID_PARAMETER:
-               err = -EINVAL;
-               break;
-       case EFI_OUT_OF_RESOURCES:
-               err = -ENOSPC;
-               break;
-       case EFI_DEVICE_ERROR:
-               err = -EIO;
-               break;
-       case EFI_WRITE_PROTECTED:
-               err = -EROFS;
-               break;
-       case EFI_SECURITY_VIOLATION:
-               err = -EACCES;
-               break;
-       case EFI_NOT_FOUND:
-               err = -ENOENT;
-               break;
-       default:
-               err = -EINVAL;
-       }
-
-       return err;
-}
-
 static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor,
                                struct list_head *head)
 {
@@ -452,8 +419,7 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
  * Returns 0 on success, or a kernel error code on failure.
  */
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
-               void *data, bool atomic, bool duplicates,
-               struct list_head *head)
+               void *data, bool duplicates, struct list_head *head)
 {
        const struct efivar_operations *ops = __efivars->ops;
        unsigned long variable_name_size = 1024;
@@ -483,7 +449,7 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
                                                &vendor_guid);
                switch (status) {
                case EFI_SUCCESS:
-                       if (!atomic)
+                       if (duplicates)
                                spin_unlock_irq(&__efivars->lock);
 
                        variable_name_size = var_name_strnsize(variable_name,
@@ -498,21 +464,19 @@ int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
                         * and may end up looping here forever.
                         */
                        if (duplicates &&
-                           variable_is_present(variable_name, &vendor_guid, head)) {
+                           variable_is_present(variable_name, &vendor_guid,
+                                               head)) {
                                dup_variable_bug(variable_name, &vendor_guid,
                                                 variable_name_size);
-                               if (!atomic)
-                                       spin_lock_irq(&__efivars->lock);
-
                                status = EFI_NOT_FOUND;
-                               break;
+                       } else {
+                               err = func(variable_name, vendor_guid,
+                                          variable_name_size, data);
+                               if (err)
+                                       status = EFI_NOT_FOUND;
                        }
 
-                       err = func(variable_name, vendor_guid, variable_name_size, data);
-                       if (err)
-                               status = EFI_NOT_FOUND;
-
-                       if (!atomic)
+                       if (duplicates)
                                spin_lock_irq(&__efivars->lock);
 
                        break;
index 815c4a5cae543e228a634eae28241bc6f24ac1ed..1b95475b6aefefd5144bca29ab464304826c6377 100644 (file)
@@ -77,7 +77,7 @@ static inline u16 fw_cfg_sel_endianness(u16 key)
 static inline void fw_cfg_read_blob(u16 key,
                                    void *buf, loff_t pos, size_t count)
 {
-       u32 glk;
+       u32 glk = -1U;
        acpi_status status;
 
        /* If we have ACPI, ensure mutual exclusion against any potential
index d9ab0cd1d205963528d7022d66213c26bf8304e5..4d9a315cfd43ea8fb9d97b0578f9c6bd4c38ef00 100644 (file)
@@ -196,44 +196,6 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
        return 0;
 }
 
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-       int error;
-
-       error = pm_runtime_get_sync(&p->pdev->dev);
-       if (error < 0)
-               return error;
-
-       return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
-       struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
-       struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
-       pm_runtime_put(&p->pdev->dev);
-}
-
 static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
 {
        struct gpio_rcar_priv *p = dev_id;
@@ -280,32 +242,18 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
 
 static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
 {
-       struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-       int error;
-
-       error = pm_runtime_get_sync(&p->pdev->dev);
-       if (error < 0)
-               return error;
-
-       error = pinctrl_request_gpio(chip->base + offset);
-       if (error)
-               pm_runtime_put(&p->pdev->dev);
-
-       return error;
+       return pinctrl_request_gpio(chip->base + offset);
 }
 
 static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
 {
-       struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
        pinctrl_free_gpio(chip->base + offset);
 
-       /* Set the GPIO as an input to ensure that the next GPIO request won't
+       /*
+        * Set the GPIO as an input to ensure that the next GPIO request won't
         * drive the GPIO pin as an output.
         */
        gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
-       pm_runtime_put(&p->pdev->dev);
 }
 
 static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
@@ -452,6 +400,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        }
 
        pm_runtime_enable(dev);
+       pm_runtime_get_sync(dev);
 
        io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
@@ -488,10 +437,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
        irq_chip->irq_unmask = gpio_rcar_irq_enable;
        irq_chip->irq_set_type = gpio_rcar_irq_set_type;
        irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
-       irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
-       irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
-       irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
-       irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
        irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
 
        ret = gpiochip_add_data(gpio_chip, p);
@@ -522,6 +467,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
 err1:
        gpiochip_remove(gpio_chip);
 err0:
+       pm_runtime_put(dev);
        pm_runtime_disable(dev);
        return ret;
 }
@@ -532,6 +478,7 @@ static int gpio_rcar_remove(struct platform_device *pdev)
 
        gpiochip_remove(&p->gpio_chip);
 
+       pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
        return 0;
 }
index 682070d20f001dce1aef51147d99aca97cb2938c..2dc52585e3f2f1cc79664beba50ef453dcea1950 100644 (file)
@@ -977,7 +977,7 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
                lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
                if (lookup) {
                        lookup->adev = adev;
-                       lookup->con_id = con_id;
+                       lookup->con_id = kstrdup(con_id, GFP_KERNEL);
                        list_add_tail(&lookup->node, &acpi_crs_lookup_list);
                }
        }
index e557fc1f17c8becfb3ddae209942326d05d5a884..7ecea83ce453cae868306b95b1600a01653a6de7 100644 (file)
@@ -541,6 +541,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
        if (!metadata_size) {
                if (bo->metadata_size) {
                        kfree(bo->metadata);
+                       bo->metadata = NULL;
                        bo->metadata_size = 0;
                }
                return 0;
index bf731e9f643e9ecddd367034179fae6a40d294ce..7f85c2c1d68156a4d91bd4b18332df6f1886fb7c 100644 (file)
@@ -276,8 +276,8 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
                        }
                }
        } else {
-               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-                       for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
                                max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
                                if (max_pix_clock >= pix_clock) {
                                        *dp_lanes = lane_num;
index 1e0bba29e16796f97c8eee38fc9d3100c405f6bc..1cd6de575305aa3f657fa1a7f5a70232b272d739 100644 (file)
@@ -298,6 +298,10 @@ bool amdgpu_atombios_encoder_mode_fixup(struct drm_encoder *encoder,
            && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
                adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+       /* vertical FP must be at least 1 */
+       if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+               adjusted_mode->crtc_vsync_start++;
+
        /* get the native mode for scaling */
        if (amdgpu_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
                amdgpu_panel_mode_fixup(encoder, adjusted_mode);
index a0f1bd711b533910ce00a2bbe642591419c4d198..e3f4c725a1c6910f3431431d9a2dbc538153f025 100644 (file)
@@ -2872,20 +2872,6 @@ static void intel_dp_info(struct seq_file *m,
                intel_panel_info(m, &intel_connector->panel);
 }
 
-static void intel_dp_mst_info(struct seq_file *m,
-                         struct intel_connector *intel_connector)
-{
-       struct intel_encoder *intel_encoder = intel_connector->encoder;
-       struct intel_dp_mst_encoder *intel_mst =
-               enc_to_mst(&intel_encoder->base);
-       struct intel_digital_port *intel_dig_port = intel_mst->primary;
-       struct intel_dp *intel_dp = &intel_dig_port->dp;
-       bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
-                                       intel_connector->port);
-
-       seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
 static void intel_hdmi_info(struct seq_file *m,
                            struct intel_connector *intel_connector)
 {
@@ -2929,8 +2915,6 @@ static void intel_connector_info(struct seq_file *m,
                        intel_hdmi_info(m, intel_connector);
                else if (intel_encoder->type == INTEL_OUTPUT_LVDS)
                        intel_lvds_info(m, intel_connector);
-               else if (intel_encoder->type == INTEL_OUTPUT_DP_MST)
-                       intel_dp_mst_info(m, intel_connector);
        }
 
        seq_printf(m, "\tmodes:\n");
index 30798cbc6fc08938cc2adc62089148d545c118a6..6d2fb3f4ac628fddaaa5f6da8f724c3787330c5b 100644 (file)
@@ -792,7 +792,7 @@ static int i915_drm_resume(struct drm_device *dev)
 static int i915_drm_resume_early(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret = 0;
+       int ret;
 
        /*
         * We have a resume ordering issue with the snd-hda driver also
@@ -803,6 +803,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
         * FIXME: This should be solved with a special hdmi sink device or
         * similar so that power domains can be employed.
         */
+
+       /*
+        * Note that we need to set the power state explicitly, since we
+        * powered off the device during freeze and the PCI core won't power
+        * it back up for us during thaw. Powering off the device during
+        * freeze is not a hard requirement though, and during the
+        * suspend/resume phases the PCI core makes sure we get here with the
+        * device powered on. So in case we change our freeze logic and keep
+        * the device powered we can also remove the following set power state
+        * call.
+        */
+       ret = pci_set_power_state(dev->pdev, PCI_D0);
+       if (ret) {
+               DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+               goto out;
+       }
+
+       /*
+        * Note that pci_enable_device() first enables any parent bridge
+        * device and only then sets the power state for this device. The
+        * bridge enabling is a nop though, since bridge devices are resumed
+        * first. The order of enabling power and enabling the device is
+        * imposed by the PCI core as described above, so here we preserve the
+        * same order for the freeze/thaw phases.
+        *
+        * TODO: eventually we should remove pci_disable_device() /
+        * pci_enable_enable_device() from suspend/resume. Due to how they
+        * depend on the device enable refcount we can't anyway depend on them
+        * disabling/enabling the device.
+        */
        if (pci_enable_device(dev->pdev)) {
                ret = -EIO;
                goto out;
index f76cbf3e5d1e1999afa61b62bf9740c308e552bb..363bd79dea2ef476bdf0f14886248175310720f6 100644 (file)
@@ -2907,7 +2907,14 @@ enum skl_disp_power_wells {
 #define GEN6_RP_STATE_CAP      _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
 #define BXT_RP_STATE_CAP        _MMIO(0x138170)
 
-#define INTERVAL_1_28_US(us)   (((us) * 100) >> 7)
+/*
+ * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
+ * 8300) freezing up around GPU hangs. Looks as if even
+ * scheduling/timer interrupts start misbehaving if the RPS
+ * EI/thresholds are "bad", leading to a very sluggish or even
+ * frozen machine.
+ */
+#define INTERVAL_1_28_US(us)   roundup(((us) * 100) >> 7, 25)
 #define INTERVAL_1_33_US(us)   (((us) * 3)   >> 2)
 #define INTERVAL_0_833_US(us)  (((us) * 6) / 5)
 #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
@@ -7437,6 +7444,8 @@ enum skl_disp_power_wells {
 #define  TRANS_CLK_SEL_DISABLED                (0x0<<29)
 #define  TRANS_CLK_SEL_PORT(x)         (((x)+1)<<29)
 
+#define CDCLK_FREQ                     _MMIO(0x46200)
+
 #define _TRANSA_MSA_MISC               0x60410
 #define _TRANSB_MSA_MISC               0x61410
 #define _TRANSC_MSA_MISC               0x62410
index 30f921421b0c944217832ba86856a6904f8fef11..7d281b40064a47f7e46071ef897c4720da38f9d8 100644 (file)
@@ -262,8 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
        tmp |= AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_UPPER_N_MASK;
        tmp &= ~AUD_CONFIG_LOWER_N_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        I915_WRITE(HSW_AUD_CFG(pipe), tmp);
 
@@ -476,8 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
        tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
        tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
        tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
-       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DP_MST))
+       if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
                tmp |= AUD_CONFIG_N_VALUE_INDEX;
        else
                tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -515,8 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
 
        /* ELD Conn_Type */
        connector->eld[5] &= ~(3 << 2);
-       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
-           intel_pipe_has_type(crtc, INTEL_OUTPUT_DP_MST))
+       if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
                connector->eld[5] |= (1 << 2);
 
        connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
index 505fc5cf26f845217b5bd58c8fe0a865bde5170d..0364292367b1425a297cd35e6ae81da2cf18dbc3 100644 (file)
@@ -257,8 +257,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
                pipe_config->has_pch_encoder = true;
 
        /* LPT FDI RX only supports 8bpc. */
-       if (HAS_PCH_LPT(dev))
+       if (HAS_PCH_LPT(dev)) {
+               if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
+                       DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+                       return false;
+               }
+
                pipe_config->pipe_bpp = 24;
+       }
 
        /* FDI must always be 2.7 GHz */
        if (HAS_DDI(dev)) {
index 62de9f4bce09959a8deb756c0e51ecb4e202315a..96ffcc541e17697b69bad69fb24fe5bec3866aab 100644 (file)
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
        } else if (IS_BROADWELL(dev_priv)) {
                ddi_translations_fdi = bdw_ddi_translations_fdi;
                ddi_translations_dp = bdw_ddi_translations_dp;
-               ddi_translations_edp = bdw_ddi_translations_edp;
+
+               if (dev_priv->edp_low_vswing) {
+                       ddi_translations_edp = bdw_ddi_translations_edp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+               } else {
+                       ddi_translations_edp = bdw_ddi_translations_dp;
+                       n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
+               }
+
                ddi_translations_hdmi = bdw_ddi_translations_hdmi;
-               n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
+
                n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
                n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
                hdmi_default_entry = 7;
@@ -3098,23 +3106,6 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
        I915_WRITE(FDI_RX_CTL(PIPE_A), val);
 }
 
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
-                                struct intel_crtc *intel_crtc)
-{
-       u32 temp;
-
-       if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
-               temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
-
-               intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-
-               if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
-                       return true;
-       }
-
-       return false;
-}
-
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config)
 {
@@ -3175,8 +3166,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
                break;
        }
 
-       pipe_config->has_audio =
-               intel_ddi_is_audio_enabled(dev_priv, intel_crtc);
+       if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+               temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+               if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
+                       pipe_config->has_audio = true;
+       }
 
        if (encoder->type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp_bpp &&
            pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
@@ -3201,12 +3195,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
        intel_ddi_clock_get(encoder, pipe_config);
 }
 
-static void intel_ddi_destroy(struct drm_encoder *encoder)
-{
-       /* HDMI has nothing special to destroy, so we can go with this. */
-       intel_dp_encoder_destroy(encoder);
-}
-
 static bool intel_ddi_compute_config(struct intel_encoder *encoder,
                                     struct intel_crtc_state *pipe_config)
 {
@@ -3225,7 +3213,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
 }
 
 static const struct drm_encoder_funcs intel_ddi_funcs = {
-       .destroy = intel_ddi_destroy,
+       .reset = intel_dp_encoder_reset,
+       .destroy = intel_dp_encoder_destroy,
 };
 
 static struct intel_connector *
@@ -3324,6 +3313,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
        intel_encoder->post_disable = intel_ddi_post_disable;
        intel_encoder->get_hw_state = intel_ddi_get_hw_state;
        intel_encoder->get_config = intel_ddi_get_config;
+       intel_encoder->suspend = intel_dp_encoder_suspend;
 
        intel_dig_port->port = port;
        intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
index 6e0d8283daa66d6383516c28542850d5c7364a48..0104a06d01fd6617f54bedf68353935668eee45b 100644 (file)
@@ -7988,9 +7988,6 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
 
        pipe_config->gmch_pfit.control = tmp;
        pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
-       if (INTEL_INFO(dev)->gen < 5)
-               pipe_config->gmch_pfit.lvds_border_bits =
-                       I915_READ(LVDS) & LVDS_BORDER_ENABLE;
 }
 
 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9752,6 +9749,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
        sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
        mutex_unlock(&dev_priv->rps.hw_lock);
 
+       I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+
        intel_update_cdclk(dev);
 
        WARN(cdclk != dev_priv->cdclk_freq,
@@ -13351,6 +13350,9 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
        }
 
        for_each_crtc_in_state(state, crtc, crtc_state, i) {
+               if (state->legacy_cursor_update)
+                       continue;
+
                ret = intel_crtc_wait_for_pending_flips(crtc);
                if (ret)
                        return ret;
index f069a82deb57a42a814e2e5a30f5f1b1e4c51ad8..412a34c39522fc8474b87eaa02a5532f8a32d859 100644 (file)
@@ -4898,7 +4898,7 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
        kfree(intel_dig_port);
 }
 
-static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
 {
        struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
@@ -4940,7 +4940,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
        edp_panel_vdd_schedule_off(intel_dp);
 }
 
-static void intel_dp_encoder_reset(struct drm_encoder *encoder)
+void intel_dp_encoder_reset(struct drm_encoder *encoder)
 {
        struct intel_dp *intel_dp;
 
index 937e77228466eb22e66ac765d001078a418badfd..2c999725b3d4b3ac10d9599227e00a1b392ae4f2 100644 (file)
@@ -78,8 +78,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
                return false;
        }
 
-       if (drm_dp_mst_port_has_audio(&intel_dp->mst_mgr, found->port))
-               pipe_config->has_audio = true;
        mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp);
 
        pipe_config->pbn = mst_pbn;
@@ -104,11 +102,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
        struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base);
        struct intel_digital_port *intel_dig_port = intel_mst->primary;
        struct intel_dp *intel_dp = &intel_dig_port->dp;
-       struct drm_device *dev = encoder->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_crtc *crtc = encoder->base.crtc;
-       struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
        int ret;
 
        DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
@@ -119,10 +112,6 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder)
        if (ret) {
                DRM_ERROR("failed to update payload %d\n", ret);
        }
-       if (intel_crtc->config->has_audio) {
-               intel_audio_codec_disable(encoder);
-               intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
-       }
 }
 
 static void intel_mst_post_disable_dp(struct intel_encoder *encoder)
@@ -221,7 +210,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
        struct intel_dp *intel_dp = &intel_dig_port->dp;
        struct drm_device *dev = intel_dig_port->base.base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
        enum port port = intel_dig_port->port;
        int ret;
 
@@ -234,13 +222,6 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
        ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
 
        ret = drm_dp_update_payload_part2(&intel_dp->mst_mgr);
-
-       if (crtc->config->has_audio) {
-               DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
-                                pipe_name(crtc->pipe));
-               intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
-               intel_audio_codec_enable(encoder);
-       }
 }
 
 static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -266,9 +247,6 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
 
        pipe_config->has_dp_encoder = true;
 
-       pipe_config->has_audio =
-               intel_ddi_is_audio_enabled(dev_priv, crtc);
-
        temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
        if (temp & TRANS_DDI_PHSYNC)
                flags |= DRM_MODE_FLAG_PHSYNC;
index 4c027d69fac92c99d6a67b6a98f98d9f67d84e60..9d0770c23fdece738575cc70a372353b7ea6284e 100644 (file)
@@ -1019,8 +1019,6 @@ void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
 void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp);
 bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
 void intel_ddi_fdi_disable(struct drm_crtc *crtc);
-bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
-                                struct intel_crtc *intel_crtc);
 void intel_ddi_get_config(struct intel_encoder *encoder,
                          struct intel_crtc_state *pipe_config);
 struct intel_encoder *
@@ -1238,6 +1236,8 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
 void intel_dp_start_link_train(struct intel_dp *intel_dp);
 void intel_dp_stop_link_train(struct intel_dp *intel_dp);
 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
+void intel_dp_encoder_reset(struct drm_encoder *encoder);
+void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
 void intel_dp_encoder_destroy(struct drm_encoder *encoder);
 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc);
 bool intel_dp_compute_config(struct intel_encoder *encoder,
index a0d8daed24701cf279ec6a452538eb814e092022..1ab6f687f6408ff9d72956fec2ebee33640c1755 100644 (file)
@@ -1415,8 +1415,16 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
                                hdmi_to_dig_port(intel_hdmi));
        }
 
-       if (!live_status)
-               DRM_DEBUG_KMS("Live status not up!");
+       if (!live_status) {
+               DRM_DEBUG_KMS("HDMI live status down\n");
+               /*
+                * Live status register is not reliable on all intel platforms.
+                * So consider live_status only for certain platforms, for
+                * others, read EDID to determine presence of sink.
+                */
+               if (INTEL_INFO(dev_priv)->gen < 7 || IS_IVYBRIDGE(dev_priv))
+                       live_status = true;
+       }
 
        intel_hdmi_unset_edid(connector);
 
index cd9fe609aefbc2487ce94ab01e0d495dd12d8167..10dc3517b63b32921437bcff3d2352829061c4ed 100644 (file)
@@ -123,6 +123,10 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
        pipe_config->base.adjusted_mode.flags |= flags;
 
+       if (INTEL_INFO(dev)->gen < 5)
+               pipe_config->gmch_pfit.lvds_border_bits =
+                       tmp & LVDS_BORDER_ENABLE;
+
        /* gen2/3 store dither state in pfit control, needs to match */
        if (INTEL_INFO(dev)->gen < 4) {
                tmp = I915_READ(PFIT_CONTROL);
index 8ed3cf34f82d31bbefa98847d1413f625e2b2f74..3425d8e737b344ec8d43bdeab38957e2667d00bf 100644 (file)
@@ -6646,6 +6646,12 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
        misccpctl = I915_READ(GEN7_MISCCPCTL);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
        I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
+       /*
+        * Wait at least 100 clocks before re-enabling clock gating. See
+        * the definition of L3SQCREG1 in BSpec.
+        */
+       POSTING_READ(GEN8_L3SQCREG1);
+       udelay(1);
        I915_WRITE(GEN7_MISCCPCTL, misccpctl);
 
        /*
index b80b08f71cb46e8d69d7bd94f6d951008267500e..532127c55de64197698336c92d602b0ff3043071 100644 (file)
@@ -1742,6 +1742,7 @@ static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
 static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
 {
        struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_crtc *test_crtc;
        struct radeon_crtc *test_radeon_crtc;
 
@@ -1751,6 +1752,10 @@ static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
                test_radeon_crtc = to_radeon_crtc(test_crtc);
                if (test_radeon_crtc->encoder &&
                    ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+                       /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+                       if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+                           test_radeon_crtc->pll_id == ATOM_PPLL2)
+                               continue;
                        /* for DP use the same PLL for all */
                        if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
                                return test_radeon_crtc->pll_id;
@@ -1772,6 +1777,7 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
 {
        struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
        struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct drm_crtc *test_crtc;
        struct radeon_crtc *test_radeon_crtc;
        u32 adjusted_clock, test_adjusted_clock;
@@ -1787,6 +1793,10 @@ static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
                test_radeon_crtc = to_radeon_crtc(test_crtc);
                if (test_radeon_crtc->encoder &&
                    !ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
+                       /* PPLL2 is exclusive to UNIPHYA on DCE61 */
+                       if (ASIC_IS_DCE61(rdev) && !ASIC_IS_DCE8(rdev) &&
+                           test_radeon_crtc->pll_id == ATOM_PPLL2)
+                               continue;
                        /* check if we are already driving this connector with another crtc */
                        if (test_radeon_crtc->connector == radeon_crtc->connector) {
                                /* if we are, return that pll */
index afa9db1dc0e3dfcc14f51bf3e74b2f2d3c5702c5..cead089a9e7d2ea1bc78046aeed4d6cb7e8a02d6 100644 (file)
@@ -326,8 +326,8 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
                        }
                }
        } else {
-               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-                       for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
                                max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
                                if (max_pix_clock >= pix_clock) {
                                        *dp_lanes = lane_num;
index edd05cdb0cd843dfe9ee53ecbac7f4a8908d3dd2..587cae4e73c9abeeb65d2187b9e700bb8f0fa442 100644 (file)
@@ -310,6 +310,10 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
            && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
                adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+       /* vertical FP must be at least 1 */
+       if (mode->crtc_vsync_start == mode->crtc_vdisplay)
+               adjusted_mode->crtc_vsync_start++;
+
        /* get the native mode for scaling */
        if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
                radeon_panel_mode_fixup(encoder, adjusted_mode);
index 3b0c229d7dcd23ffb7184ad79e2ebaa8f001cca9..db64e0062689b076842b9710c33e4660c96e9985 100644 (file)
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
 
        tmp &= AUX_HPD_SEL(0x7);
        tmp |= AUX_HPD_SEL(chan->rec.hpd);
-       tmp |= AUX_EN | AUX_LS_READ_EN;
+       tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
 
        WREG32(AUX_CONTROL + aux_offset[instance], tmp);
 
index e00db3f510dd425c62565d913c937c5638a072d5..abb98c77bad25c6901b910095068a3aec5059311 100644 (file)
@@ -1068,7 +1068,6 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
                        goto err_register;
                }
 
-               pdev->dev.of_node = of_node;
                pdev->dev.parent = dev;
 
                ret = platform_device_add_data(pdev, &reg->pdata,
@@ -1079,6 +1078,12 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
                        platform_device_put(pdev);
                        goto err_register;
                }
+
+               /*
+                * Set of_node only after calling platform_device_add. Otherwise
+                * the platform:imx-ipuv3-crtc modalias won't be used.
+                */
+               pdev->dev.of_node = of_node;
        }
 
        return 0;
index c6eaff5f88451f770cf036e71b9167a0202351e7..0238f0169e48f5d45b345498c5ba713c8cb9a96d 100644 (file)
 #define USB_DEVICE_ID_CORSAIR_K90      0x1b02
 
 #define USB_VENDOR_ID_CREATIVELABS     0x041e
+#define USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51     0x322c
 #define USB_DEVICE_ID_PRODIKEYS_PCMIDI 0x2801
 
 #define USB_VENDOR_ID_CVTOUCH          0x1ff7
index ed2f68edc8f1c648e30b15178fc550cb9a071f88..53fc856d6867b032e6e2100c1c87c3bf5733702c 100644 (file)
@@ -71,6 +71,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_3AXIS_5BUTTON_STICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
+       { USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
index 02c4efea241c02eb6270d19b40cc90256b04721a..cf2ba43453fd4ecd9458075a9767676534a354fe 100644 (file)
@@ -684,6 +684,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
 
                wacom->tool[idx] = wacom_intuos_get_tool_type(wacom->id[idx]);
 
+               wacom->shared->stylus_in_proximity = true;
                return 1;
        }
 
@@ -3395,6 +3396,10 @@ static const struct wacom_features wacom_features_0x33E =
        { "Wacom Intuos PT M 2", 21600, 13500, 2047, 63,
          INTUOSHT2, WACOM_INTUOS_RES, WACOM_INTUOS_RES, .touch_max = 16,
          .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
+static const struct wacom_features wacom_features_0x343 =
+       { "Wacom DTK1651", 34616, 19559, 1023, 0,
+         DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+         WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 
 static const struct wacom_features wacom_features_HID_ANY_ID =
        { "Wacom HID", .type = HID_GENERIC };
@@ -3560,6 +3565,7 @@ const struct hid_device_id wacom_ids[] = {
        { USB_DEVICE_WACOM(0x33C) },
        { USB_DEVICE_WACOM(0x33D) },
        { USB_DEVICE_WACOM(0x33E) },
+       { USB_DEVICE_WACOM(0x343) },
        { USB_DEVICE_WACOM(0x4001) },
        { USB_DEVICE_WACOM(0x4004) },
        { USB_DEVICE_WACOM(0x5000) },
index 5613e2b5cff7759861a1e2d7ad2748f7626d16da..a40a73a7b71da359574bf4061d80340b1caa30a4 100644 (file)
@@ -103,15 +103,29 @@ static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi)
  *    there is room for the producer to send the pending packet.
  */
 
-static bool hv_need_to_signal_on_read(u32 prev_write_sz,
-                                     struct hv_ring_buffer_info *rbi)
+static bool hv_need_to_signal_on_read(struct hv_ring_buffer_info *rbi)
 {
        u32 cur_write_sz;
        u32 r_size;
-       u32 write_loc = rbi->ring_buffer->write_index;
+       u32 write_loc;
        u32 read_loc = rbi->ring_buffer->read_index;
-       u32 pending_sz = rbi->ring_buffer->pending_send_sz;
+       u32 pending_sz;
 
+       /*
+        * Issue a full memory barrier before making the signaling decision.
+        * Here is the reason for having this barrier:
+        * If the reading of the pend_sz (in this function)
+        * were to be reordered and read before we commit the new read
+        * index (in the calling function)  we could
+        * have a problem. If the host were to set the pending_sz after we
+        * have sampled pending_sz and go to sleep before we commit the
+        * read index, we could miss sending the interrupt. Issue a full
+        * memory barrier to address this.
+        */
+       mb();
+
+       pending_sz = rbi->ring_buffer->pending_send_sz;
+       write_loc = rbi->ring_buffer->write_index;
        /* If the other end is not blocked on write don't bother. */
        if (pending_sz == 0)
                return false;
@@ -120,7 +134,7 @@ static bool hv_need_to_signal_on_read(u32 prev_write_sz,
        cur_write_sz = write_loc >= read_loc ? r_size - (write_loc - read_loc) :
                        read_loc - write_loc;
 
-       if ((prev_write_sz < pending_sz) && (cur_write_sz >= pending_sz))
+       if (cur_write_sz >= pending_sz)
                return true;
 
        return false;
@@ -455,7 +469,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info,
        /* Update the read index */
        hv_set_next_read_location(inring_info, next_read_location);
 
-       *signal = hv_need_to_signal_on_read(bytes_avail_towrite, inring_info);
+       *signal = hv_need_to_signal_on_read(inring_info);
 
        return ret;
 }
index dbee13ad33a3c341777beb60715ec1f7007fd767..2e154cb5168567ce3f09ef095bb65a639802d7e4 100644 (file)
@@ -451,6 +451,8 @@ static int at91_adc_probe(struct platform_device *pdev)
        if (ret)
                goto vref_disable;
 
+       platform_set_drvdata(pdev, indio_dev);
+
        ret = iio_device_register(indio_dev);
        if (ret < 0)
                goto per_clk_disable_unprepare;
index f581256d9d4c2473eca8979b3c0075c308f844a8..5ee4e0dc093e687445cfb3daab47f8a7f0e182fe 100644 (file)
@@ -104,6 +104,19 @@ static int inv_mpu6050_deselect_bypass(struct i2c_adapter *adap,
        return 0;
 }
 
+static const char *inv_mpu_match_acpi_device(struct device *dev, int *chip_id)
+{
+       const struct acpi_device_id *id;
+
+       id = acpi_match_device(dev->driver->acpi_match_table, dev);
+       if (!id)
+               return NULL;
+
+       *chip_id = (int)id->driver_data;
+
+       return dev_name(dev);
+}
+
 /**
  *  inv_mpu_probe() - probe function.
  *  @client:          i2c client.
@@ -115,14 +128,25 @@ static int inv_mpu_probe(struct i2c_client *client,
                         const struct i2c_device_id *id)
 {
        struct inv_mpu6050_state *st;
-       int result;
-       const char *name = id ? id->name : NULL;
+       int result, chip_type;
        struct regmap *regmap;
+       const char *name;
 
        if (!i2c_check_functionality(client->adapter,
                                     I2C_FUNC_SMBUS_I2C_BLOCK))
                return -EOPNOTSUPP;
 
+       if (id) {
+               chip_type = (int)id->driver_data;
+               name = id->name;
+       } else if (ACPI_HANDLE(&client->dev)) {
+               name = inv_mpu_match_acpi_device(&client->dev, &chip_type);
+               if (!name)
+                       return -ENODEV;
+       } else {
+               return -ENOSYS;
+       }
+
        regmap = devm_regmap_init_i2c(client, &inv_mpu_regmap_config);
        if (IS_ERR(regmap)) {
                dev_err(&client->dev, "Failed to register i2c regmap %d\n",
@@ -131,7 +155,7 @@ static int inv_mpu_probe(struct i2c_client *client,
        }
 
        result = inv_mpu_core_probe(regmap, client->irq, name,
-                                   NULL, id->driver_data);
+                                   NULL, chip_type);
        if (result < 0)
                return result;
 
index dea6c4361de013c38441100dc26078fd13393a89..7bcb8d839f0549a1a89c6291a78796841cf1db5c 100644 (file)
@@ -46,6 +46,7 @@ static int inv_mpu_probe(struct spi_device *spi)
        struct regmap *regmap;
        const struct spi_device_id *id = spi_get_device_id(spi);
        const char *name = id ? id->name : NULL;
+       const int chip_type = id ? id->driver_data : 0;
 
        regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
        if (IS_ERR(regmap)) {
@@ -55,7 +56,7 @@ static int inv_mpu_probe(struct spi_device *spi)
        }
 
        return inv_mpu_core_probe(regmap, spi->irq, name,
-                                 inv_mpu_i2c_disable, id->driver_data);
+                                 inv_mpu_i2c_disable, chip_type);
 }
 
 static int inv_mpu_remove(struct spi_device *spi)
index 9c5c9ef3f1dad25f37e108aeb9752940f46aa393..0e931a9a1669424278a1daa935a919e32db32e35 100644 (file)
@@ -462,6 +462,8 @@ static int ak8975_setup_irq(struct ak8975_data *data)
        int rc;
        int irq;
 
+       init_waitqueue_head(&data->data_ready_queue);
+       clear_bit(0, &data->flags);
        if (client->irq)
                irq = client->irq;
        else
@@ -477,8 +479,6 @@ static int ak8975_setup_irq(struct ak8975_data *data)
                return rc;
        }
 
-       init_waitqueue_head(&data->data_ready_queue);
-       clear_bit(0, &data->flags);
        data->eoc_irq = irq;
 
        return rc;
@@ -732,7 +732,7 @@ static int ak8975_probe(struct i2c_client *client,
        int eoc_gpio;
        int err;
        const char *name = NULL;
-       enum asahi_compass_chipset chipset;
+       enum asahi_compass_chipset chipset = AK_MAX_TYPE;
 
        /* Grab and set up the supplied GPIO. */
        if (client->dev.platform_data)
index 80b6bedc172f32a1d9dfad1e36ee8aa3a4a16240..64b3d11dcf1ed588a95d149f2bbd9ff884562893 100644 (file)
@@ -612,6 +612,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
        struct Scsi_Host *shost;
        struct iser_conn *iser_conn = NULL;
        struct ib_conn *ib_conn;
+       u32 max_fr_sectors;
        u16 max_cmds;
 
        shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
@@ -632,7 +633,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                iser_conn = ep->dd_data;
                max_cmds = iser_conn->max_cmds;
                shost->sg_tablesize = iser_conn->scsi_sg_tablesize;
-               shost->max_sectors = iser_conn->scsi_max_sectors;
 
                mutex_lock(&iser_conn->state_mutex);
                if (iser_conn->state != ISER_CONN_UP) {
@@ -657,8 +657,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                 */
                shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
                        ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-               shost->max_sectors = min_t(unsigned int,
-                       1024, (shost->sg_tablesize * PAGE_SIZE) >> 9);
 
                if (iscsi_host_add(shost,
                                   ib_conn->device->ib_device->dma_device)) {
@@ -672,6 +670,15 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
                        goto free_host;
        }
 
+       /*
+        * FRs or FMRs can only map up to a (device) page per entry, but if the
+        * first entry is misaligned we'll end up using using two entries
+        * (head and tail) for a single page worth data, so we have to drop
+        * one segment from the calculation.
+        */
+       max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
+       shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+
        if (cmds_max > max_cmds) {
                iser_info("cmds_max changed from %u to %u\n",
                          cmds_max, max_cmds);
@@ -989,7 +996,6 @@ static struct scsi_host_template iscsi_iser_sht = {
        .queuecommand           = iscsi_queuecommand,
        .change_queue_depth     = scsi_change_queue_depth,
        .sg_tablesize           = ISCSI_ISER_DEF_SG_TABLESIZE,
-       .max_sectors            = ISER_DEF_MAX_SECTORS,
        .cmd_per_lun            = ISER_DEF_CMD_PER_LUN,
        .eh_abort_handler       = iscsi_eh_abort,
        .eh_device_reset_handler= iscsi_eh_device_reset,
index a806ba3818f7267dd2036dd870c4f60ca28dc0d5..8d6326d7e7beaf1875bb95af385dff1285f10b0e 100644 (file)
@@ -255,12 +255,14 @@ static int max8997_haptic_probe(struct platform_device *pdev)
        struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
        const struct max8997_platform_data *pdata =
                                        dev_get_platdata(iodev->dev);
-       const struct max8997_haptic_platform_data *haptic_pdata =
-                                       pdata->haptic_pdata;
+       const struct max8997_haptic_platform_data *haptic_pdata = NULL;
        struct max8997_haptic *chip;
        struct input_dev *input_dev;
        int error;
 
+       if (pdata)
+               haptic_pdata = pdata->haptic_pdata;
+
        if (!haptic_pdata) {
                dev_err(&pdev->dev, "no haptic platform data\n");
                return -EINVAL;
index 53e33fab3f7afadef16e97a76271752d9d6d891c..42de34b9299633f5104f8127e3c8bb50ec83f294 100644 (file)
@@ -181,6 +181,14 @@ static void vibra_play_work(struct work_struct *work)
 {
        struct vibra_info *info = container_of(work,
                                struct vibra_info, play_work);
+       int ret;
+
+       /* Do not allow effect, while the routing is set to use audio */
+       ret = twl6040_get_vibralr_status(info->twl6040);
+       if (ret & TWL6040_VIBSEL) {
+               dev_info(info->dev, "Vibra is configured for audio\n");
+               return;
+       }
 
        mutex_lock(&info->mutex);
 
@@ -199,14 +207,6 @@ static int vibra_play(struct input_dev *input, void *data,
                      struct ff_effect *effect)
 {
        struct vibra_info *info = input_get_drvdata(input);
-       int ret;
-
-       /* Do not allow effect, while the routing is set to use audio */
-       ret = twl6040_get_vibralr_status(info->twl6040);
-       if (ret & TWL6040_VIBSEL) {
-               dev_info(&input->dev, "Vibra is configured for audio\n");
-               return -EBUSY;
-       }
 
        info->weak_speed = effect->u.rumble.weak_magnitude;
        info->strong_speed = effect->u.rumble.strong_magnitude;
@@ -257,6 +257,7 @@ static int twl6040_vibra_probe(struct platform_device *pdev)
        int vddvibr_uV = 0;
        int error;
 
+       of_node_get(twl6040_core_dev->of_node);
        twl6040_core_node = of_find_node_by_name(twl6040_core_dev->of_node,
                                                 "vibra");
        if (!twl6040_core_node) {
index fdc243ca93ed7c50c89c18de6477e6b3c6d44a6f..e583f8b504549c6415f697f5d746a38e440dc556 100644 (file)
@@ -2,6 +2,10 @@
  * BYD TouchPad PS/2 mouse driver
  *
  * Copyright (C) 2015 Chris Diamand <chris@diamand.org>
+ * Copyright (C) 2015 Richard Pospesel
+ * Copyright (C) 2015 Tai Chi Minh Ralph Eastwood
+ * Copyright (C) 2015 Martin Wimpress
+ * Copyright (C) 2015 Jay Kuri
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published by
index 2160512e861af57d289fb5873ef80b81c2fbf7c9..5af7907d0af4dd6ab79639ee91287cdf0bc90202 100644 (file)
@@ -1093,6 +1093,19 @@ static int mxt_t6_command(struct mxt_data *data, u16 cmd_offset,
        return 0;
 }
 
+static int mxt_acquire_irq(struct mxt_data *data)
+{
+       int error;
+
+       enable_irq(data->irq);
+
+       error = mxt_process_messages_until_invalid(data);
+       if (error)
+               return error;
+
+       return 0;
+}
+
 static int mxt_soft_reset(struct mxt_data *data)
 {
        struct device *dev = &data->client->dev;
@@ -1111,7 +1124,7 @@ static int mxt_soft_reset(struct mxt_data *data)
        /* Ignore CHG line for 100ms after reset */
        msleep(100);
 
-       enable_irq(data->irq);
+       mxt_acquire_irq(data);
 
        ret = mxt_wait_for_completion(data, &data->reset_completion,
                                      MXT_RESET_TIMEOUT);
@@ -1466,19 +1479,6 @@ release_mem:
        return ret;
 }
 
-static int mxt_acquire_irq(struct mxt_data *data)
-{
-       int error;
-
-       enable_irq(data->irq);
-
-       error = mxt_process_messages_until_invalid(data);
-       if (error)
-               return error;
-
-       return 0;
-}
-
 static int mxt_get_info(struct mxt_data *data)
 {
        struct i2c_client *client = data->client;
index 9bbadaaf6bc3723f044a9c44e8619773d242ec8f..7b3845aa5983ad57892a0022990f3353bcca82b6 100644 (file)
@@ -370,8 +370,8 @@ static int zforce_touch_event(struct zforce_ts *ts, u8 *payload)
                        point.coord_x = point.coord_y = 0;
                }
 
-               point.state = payload[9 * i + 5] & 0x03;
-               point.id = (payload[9 * i + 5] & 0xfc) >> 2;
+               point.state = payload[9 * i + 5] & 0x0f;
+               point.id = (payload[9 * i + 5] & 0xf0) >> 4;
 
                /* determine touch major, minor and orientation */
                point.area_major = max(payload[9 * i + 6],
index 194580fba7fd8b7a965ea87f9856cada61e15be6..14d3b37944df031214c2c6951ed15c46da104842 100644 (file)
@@ -284,6 +284,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
         * go away inside make_request
         */
        sectors = bio_sectors(bio);
+       /* bio could be mergeable after passing to underlayer */
+       bio->bi_rw &= ~REQ_NOMERGE;
        mddev->pers->make_request(mddev, bio);
 
        cpu = part_stat_lock();
index 2ea12c6bf65997895e26a89fe8977451ae438d97..34783a3c8b3c1af780a092b8d11018f702541481 100644 (file)
@@ -70,7 +70,6 @@ static void dump_zones(struct mddev *mddev)
                        (unsigned long long)zone_size>>1);
                zone_start = conf->strip_zone[j].zone_end;
        }
-       printk(KERN_INFO "\n");
 }
 
 static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
@@ -85,6 +84,7 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
        struct r0conf *conf = kzalloc(sizeof(*conf), GFP_KERNEL);
        unsigned short blksize = 512;
 
+       *private_conf = ERR_PTR(-ENOMEM);
        if (!conf)
                return -ENOMEM;
        rdev_for_each(rdev1, mddev) {
index 8ab8b65e17413e4a015aab5e073728fd23e21b47..e48c262ce0322fe504d92d14403c70cb3ef4a608 100644 (file)
@@ -3502,8 +3502,6 @@ returnbi:
                                dev = &sh->dev[i];
                        } else if (test_bit(R5_Discard, &dev->flags))
                                discard_pending = 1;
-                       WARN_ON(test_bit(R5_SkipCopy, &dev->flags));
-                       WARN_ON(dev->page != dev->orig_page);
                }
 
        r5l_stripe_write_finished(sh);
index 6e43c95629ea8e8fc550b6fc63bf56b5c627b1ac..3cfd7af8c5cab06607a94c716951daa6166b60e2 100644 (file)
@@ -846,11 +846,11 @@ struct media_device *media_device_find_devres(struct device *dev)
 }
 EXPORT_SYMBOL_GPL(media_device_find_devres);
 
+#if IS_ENABLED(CONFIG_PCI)
 void media_device_pci_init(struct media_device *mdev,
                           struct pci_dev *pci_dev,
                           const char *name)
 {
-#ifdef CONFIG_PCI
        mdev->dev = &pci_dev->dev;
 
        if (name)
@@ -866,16 +866,16 @@ void media_device_pci_init(struct media_device *mdev,
        mdev->driver_version = LINUX_VERSION_CODE;
 
        media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(media_device_pci_init);
+#endif
 
+#if IS_ENABLED(CONFIG_USB)
 void __media_device_usb_init(struct media_device *mdev,
                             struct usb_device *udev,
                             const char *board_name,
                             const char *driver_name)
 {
-#ifdef CONFIG_USB
        mdev->dev = &udev->dev;
 
        if (driver_name)
@@ -895,9 +895,9 @@ void __media_device_usb_init(struct media_device *mdev,
        mdev->driver_version = LINUX_VERSION_CODE;
 
        media_device_init(mdev);
-#endif
 }
 EXPORT_SYMBOL_GPL(__media_device_usb_init);
+#endif
 
 
 #endif /* CONFIG_MEDIA_CONTROLLER */
index feb521f28e14857b8344d7320769c87183cd17ac..4f494acd8150fd85f8aaedfb3572b85bb22b460d 100644 (file)
@@ -1446,22 +1446,13 @@ static int fimc_md_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, fmd);
 
-       /* Protect the media graph while we're registering entities */
-       mutex_lock(&fmd->media_dev.graph_mutex);
-
        ret = fimc_md_register_platform_entities(fmd, dev->of_node);
-       if (ret) {
-               mutex_unlock(&fmd->media_dev.graph_mutex);
+       if (ret)
                goto err_clk;
-       }
 
        ret = fimc_md_register_sensor_entities(fmd);
-       if (ret) {
-               mutex_unlock(&fmd->media_dev.graph_mutex);
+       if (ret)
                goto err_m_ent;
-       }
-
-       mutex_unlock(&fmd->media_dev.graph_mutex);
 
        ret = device_create_file(&pdev->dev, &dev_attr_subdev_conf_mode);
        if (ret)
index 0b44b9accf50794172ddb2249414883a1b125b43..af237af204e2760fc0efcaac6ac2dc31207ef662 100644 (file)
@@ -493,21 +493,17 @@ static int s3c_camif_probe(struct platform_device *pdev)
        if (ret < 0)
                goto err_sens;
 
-       mutex_lock(&camif->media_dev.graph_mutex);
-
        ret = v4l2_device_register_subdev_nodes(&camif->v4l2_dev);
        if (ret < 0)
-               goto err_unlock;
+               goto err_sens;
 
        ret = camif_register_video_nodes(camif);
        if (ret < 0)
-               goto err_unlock;
+               goto err_sens;
 
        ret = camif_create_media_links(camif);
        if (ret < 0)
-               goto err_unlock;
-
-       mutex_unlock(&camif->media_dev.graph_mutex);
+               goto err_sens;
 
        ret = media_device_register(&camif->media_dev);
        if (ret < 0)
@@ -516,8 +512,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
        pm_runtime_put(dev);
        return 0;
 
-err_unlock:
-       mutex_unlock(&camif->media_dev.graph_mutex);
 err_sens:
        v4l2_device_unregister(&camif->v4l2_dev);
        media_device_unregister(&camif->media_dev);
index 7f366f1b0377a3557201a1bf64470472a29d5658..0b1b8c7b6ce51e69cd2c7f9e8c3eec7f6b6744eb 100644 (file)
@@ -74,11 +74,6 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
        return 0;
 }
 
-static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
-{
-       return __verify_planes_array(vb, pb);
-}
-
 /**
  * __verify_length() - Verify that the bytesused value for each plane fits in
  * the plane length and that the data offset doesn't exceed the bytesused value.
@@ -442,7 +437,6 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
 }
 
 static const struct vb2_buf_ops v4l2_buf_ops = {
-       .verify_planes_array    = __verify_planes_array_core,
        .fill_user_buffer       = __fill_v4l2_buffer,
        .fill_vb2_buffer        = __fill_vb2_buffer,
        .copy_timestamp         = __copy_timestamp,
index e94c7fb6712aab35a1a3278241c3bab494f68777..88e45234d527518e0d449d598c5f04a5be5538a1 100644 (file)
@@ -945,6 +945,11 @@ static long vop_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
                        ret = -EFAULT;
                        goto free_ret;
                }
+               /* Ensure desc has not changed between the two reads */
+               if (memcmp(&dd, dd_config, sizeof(dd))) {
+                       ret = -EINVAL;
+                       goto free_ret;
+               }
                mutex_lock(&vdev->vdev_mutex);
                mutex_lock(&vi->vop_mutex);
                ret = vop_virtio_add_device(vdev, dd_config);
index a2904029cccc2949b90d66cc8fb7d3fcbc11f103..5e572b3510b9483a6cf38a055ed473ab83776ab0 100644 (file)
@@ -2181,7 +2181,7 @@ int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
                               struct net_device *bridge)
 {
        struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
-       int i, err;
+       int i, err = 0;
 
        mutex_lock(&ps->smi_mutex);
 
index b212488606da4877117e7e9f617a169f2d62f282..11be8044e0d7b9e62f0ac2494925e83d21c3aeab 100644 (file)
@@ -43,6 +43,7 @@ static void xgene_cle_idt_to_hw(u32 dstqid, u32 fpsel,
 static void xgene_cle_dbptr_to_hw(struct xgene_enet_pdata *pdata,
                                  struct xgene_cle_dbptr *dbptr, u32 *buf)
 {
+       buf[0] = SET_VAL(CLE_DROP, dbptr->drop);
        buf[4] = SET_VAL(CLE_FPSEL, dbptr->fpsel) |
                 SET_VAL(CLE_DSTQIDL, dbptr->dstqid);
 
@@ -412,7 +413,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                        .branch = {
                                {
                                        /* IPV4 */
-                                       .valid = 0,
+                                       .valid = 1,
                                        .next_packet_pointer = 22,
                                        .jump_bw = JMP_FW,
                                        .jump_rel = JMP_ABS,
@@ -420,7 +421,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                        .next_node = PKT_PROT_NODE,
                                        .next_branch = 0,
                                        .data = 0x8,
-                                       .mask = 0xffff
+                                       .mask = 0x0
                                },
                                {
                                        .valid = 0,
@@ -456,7 +457,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                        .next_node = RSS_IPV4_TCP_NODE,
                                        .next_branch = 0,
                                        .data = 0x0600,
-                                       .mask = 0xffff
+                                       .mask = 0x00ff
                                },
                                {
                                        /* UDP */
@@ -468,7 +469,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                        .next_node = RSS_IPV4_UDP_NODE,
                                        .next_branch = 0,
                                        .data = 0x1100,
-                                       .mask = 0xffff
+                                       .mask = 0x00ff
                                },
                                {
                                        .valid = 0,
@@ -642,7 +643,7 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
                                {
                                        /* TCP DST Port */
                                        .valid = 0,
-                                       .next_packet_pointer = 256,
+                                       .next_packet_pointer = 258,
                                        .jump_bw = JMP_FW,
                                        .jump_rel = JMP_ABS,
                                        .operation = EQT,
index 29a17abdd828174cb3f5e4fad10458c0df26efca..3bf90683240e1b3d759e67d13d7071aa4e08c020 100644 (file)
@@ -83,6 +83,8 @@
 #define CLE_TYPE_POS           0
 #define CLE_TYPE_LEN           2
 
+#define CLE_DROP_POS           28
+#define CLE_DROP_LEN           1
 #define CLE_DSTQIDL_POS                25
 #define CLE_DSTQIDL_LEN                7
 #define CLE_DSTQIDH_POS                0
index 39e081a70f5b4f5bb9b674d7fe9c470f6d807b80..513d2a62ee6dae1da339c030f4e59453334d39d1 100644 (file)
@@ -219,27 +219,30 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
                            struct xgene_enet_pdata *pdata,
                            enum xgene_enet_err_code status)
 {
-       struct rtnl_link_stats64 *stats = &pdata->stats;
-
        switch (status) {
        case INGRESS_CRC:
-               stats->rx_crc_errors++;
+               ring->rx_crc_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_CHECKSUM:
        case INGRESS_CHECKSUM_COMPUTE:
-               stats->rx_errors++;
+               ring->rx_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_TRUNC_FRAME:
-               stats->rx_frame_errors++;
+               ring->rx_frame_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_PKT_LEN:
-               stats->rx_length_errors++;
+               ring->rx_length_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_PKT_UNDER:
-               stats->rx_frame_errors++;
+               ring->rx_frame_errors++;
+               ring->rx_dropped++;
                break;
        case INGRESS_FIFO_OVERRUN:
-               stats->rx_fifo_errors++;
+               ring->rx_fifo_errors++;
                break;
        default:
                break;
index ba7da98af2efb48c2a5811244ea4776185ce2534..45220be3122f99ecf50c96e5f83fb6c8399108ac 100644 (file)
@@ -86,7 +86,7 @@ enum xgene_enet_rm {
 #define RINGADDRL_POS          5
 #define RINGADDRL_LEN          27
 #define RINGADDRH_POS          0
-#define RINGADDRH_LEN          6
+#define RINGADDRH_LEN          7
 #define RINGSIZE_POS           23
 #define RINGSIZE_LEN           3
 #define RINGTYPE_POS           19
@@ -94,9 +94,9 @@ enum xgene_enet_rm {
 #define RINGMODE_POS           20
 #define RINGMODE_LEN           3
 #define RECOMTIMEOUTL_POS      28
-#define RECOMTIMEOUTL_LEN      3
+#define RECOMTIMEOUTL_LEN      4
 #define RECOMTIMEOUTH_POS      0
-#define RECOMTIMEOUTH_LEN      2
+#define RECOMTIMEOUTH_LEN      3
 #define NUMMSGSINQ_POS         1
 #define NUMMSGSINQ_LEN         16
 #define ACCEPTLERR             BIT(19)
@@ -201,6 +201,8 @@ enum xgene_enet_rm {
 #define USERINFO_LEN                   32
 #define FPQNUM_POS                     32
 #define FPQNUM_LEN                     12
+#define ELERR_POS                       46
+#define ELERR_LEN                       2
 #define NV_POS                         50
 #define NV_LEN                         1
 #define LL_POS                         51
index 8d4c1ad2fc6051f66ffc059456927511e9dd2fe3..fd200883d228eebb5063dc085858906ee23a372a 100644 (file)
@@ -443,8 +443,8 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
 
        skb_tx_timestamp(skb);
 
-       pdata->stats.tx_packets++;
-       pdata->stats.tx_bytes += skb->len;
+       tx_ring->tx_packets++;
+       tx_ring->tx_bytes += skb->len;
 
        pdata->ring_ops->wr_cmd(tx_ring, count);
        return NETDEV_TX_OK;
@@ -483,12 +483,12 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
        skb = buf_pool->rx_skb[skb_index];
 
        /* checking for error */
-       status = GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
+       status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
+                 GET_VAL(LERR, le64_to_cpu(raw_desc->m0));
        if (unlikely(status > 2)) {
                dev_kfree_skb_any(skb);
                xgene_enet_parse_error(rx_ring, netdev_priv(rx_ring->ndev),
                                       status);
-               pdata->stats.rx_dropped++;
                ret = -EIO;
                goto out;
        }
@@ -506,8 +506,8 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
                xgene_enet_skip_csum(skb);
        }
 
-       pdata->stats.rx_packets++;
-       pdata->stats.rx_bytes += datalen;
+       rx_ring->rx_packets++;
+       rx_ring->rx_bytes += datalen;
        napi_gro_receive(&rx_ring->napi, skb);
 out:
        if (--rx_ring->nbufpool == 0) {
@@ -630,7 +630,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
                ring = pdata->rx_ring[i];
                irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
                ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
-                                      IRQF_SHARED, ring->irq_name, ring);
+                                      0, ring->irq_name, ring);
                if (ret) {
                        netdev_err(ndev, "Failed to request irq %s\n",
                                   ring->irq_name);
@@ -641,7 +641,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
                ring = pdata->tx_ring[i]->cp_ring;
                irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
                ret = devm_request_irq(dev, ring->irq, xgene_enet_rx_irq,
-                                      IRQF_SHARED, ring->irq_name, ring);
+                                      0, ring->irq_name, ring);
                if (ret) {
                        netdev_err(ndev, "Failed to request irq %s\n",
                                   ring->irq_name);
@@ -1114,12 +1114,31 @@ static struct rtnl_link_stats64 *xgene_enet_get_stats64(
 {
        struct xgene_enet_pdata *pdata = netdev_priv(ndev);
        struct rtnl_link_stats64 *stats = &pdata->stats;
+       struct xgene_enet_desc_ring *ring;
+       int i;
 
-       stats->rx_errors += stats->rx_length_errors +
-                           stats->rx_crc_errors +
-                           stats->rx_frame_errors +
-                           stats->rx_fifo_errors;
-       memcpy(storage, &pdata->stats, sizeof(struct rtnl_link_stats64));
+       memset(stats, 0, sizeof(struct rtnl_link_stats64));
+       for (i = 0; i < pdata->txq_cnt; i++) {
+               ring = pdata->tx_ring[i];
+               if (ring) {
+                       stats->tx_packets += ring->tx_packets;
+                       stats->tx_bytes += ring->tx_bytes;
+               }
+       }
+
+       for (i = 0; i < pdata->rxq_cnt; i++) {
+               ring = pdata->rx_ring[i];
+               if (ring) {
+                       stats->rx_packets += ring->rx_packets;
+                       stats->rx_bytes += ring->rx_bytes;
+                       stats->rx_errors += ring->rx_length_errors +
+                               ring->rx_crc_errors +
+                               ring->rx_frame_errors +
+                               ring->rx_fifo_errors;
+                       stats->rx_dropped += ring->rx_dropped;
+               }
+       }
+       memcpy(storage, stats, sizeof(struct rtnl_link_stats64));
 
        return storage;
 }
@@ -1234,6 +1253,13 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
        for (i = 0; i < max_irqs; i++) {
                ret = platform_get_irq(pdev, i);
                if (ret <= 0) {
+                       if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+                               max_irqs = i;
+                               pdata->rxq_cnt = max_irqs / 2;
+                               pdata->txq_cnt = max_irqs / 2;
+                               pdata->cq_cnt = max_irqs / 2;
+                               break;
+                       }
                        dev_err(dev, "Unable to get ENET IRQ\n");
                        ret = ret ? : -ENXIO;
                        return ret;
@@ -1437,19 +1463,28 @@ static void xgene_enet_setup_ops(struct xgene_enet_pdata *pdata)
                pdata->port_ops = &xgene_xgport_ops;
                pdata->cle_ops = &xgene_cle3in_ops;
                pdata->rm = RM0;
-               pdata->rxq_cnt = XGENE_NUM_RX_RING;
-               pdata->txq_cnt = XGENE_NUM_TX_RING;
-               pdata->cq_cnt = XGENE_NUM_TXC_RING;
+               if (!pdata->rxq_cnt) {
+                       pdata->rxq_cnt = XGENE_NUM_RX_RING;
+                       pdata->txq_cnt = XGENE_NUM_TX_RING;
+                       pdata->cq_cnt = XGENE_NUM_TXC_RING;
+               }
                break;
        }
 
        if (pdata->enet_id == XGENE_ENET1) {
                switch (pdata->port_id) {
                case 0:
-                       pdata->cpu_bufnum = START_CPU_BUFNUM_0;
-                       pdata->eth_bufnum = START_ETH_BUFNUM_0;
-                       pdata->bp_bufnum = START_BP_BUFNUM_0;
-                       pdata->ring_num = START_RING_NUM_0;
+                       if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+                               pdata->cpu_bufnum = X2_START_CPU_BUFNUM_0;
+                               pdata->eth_bufnum = X2_START_ETH_BUFNUM_0;
+                               pdata->bp_bufnum = X2_START_BP_BUFNUM_0;
+                               pdata->ring_num = START_RING_NUM_0;
+                       } else {
+                               pdata->cpu_bufnum = START_CPU_BUFNUM_0;
+                               pdata->eth_bufnum = START_ETH_BUFNUM_0;
+                               pdata->bp_bufnum = START_BP_BUFNUM_0;
+                               pdata->ring_num = START_RING_NUM_0;
+                       }
                        break;
                case 1:
                        if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
@@ -1595,21 +1630,22 @@ static int xgene_enet_probe(struct platform_device *pdev)
 
        ret = xgene_enet_init_hw(pdata);
        if (ret)
-               goto err;
+               goto err_netdev;
 
        mac_ops = pdata->mac_ops;
        if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
                ret = xgene_enet_mdio_config(pdata);
                if (ret)
-                       goto err;
+                       goto err_netdev;
        } else {
                INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
        }
 
        xgene_enet_napi_add(pdata);
        return 0;
-err:
+err_netdev:
        unregister_netdev(ndev);
+err:
        free_netdev(ndev);
        return ret;
 }
index 175d18890c7a7da01895d7970ef653ce0170c01a..9d9cf445148c165d22376958d3552881d474bc24 100644 (file)
 #define XGENE_ENET_MSS 1448
 #define XGENE_MIN_ENET_FRAME_SIZE      60
 
-#define XGENE_MAX_ENET_IRQ     8
-#define XGENE_NUM_RX_RING      4
-#define XGENE_NUM_TX_RING      4
-#define XGENE_NUM_TXC_RING     4
+#define XGENE_MAX_ENET_IRQ     16
+#define XGENE_NUM_RX_RING      8
+#define XGENE_NUM_TX_RING      8
+#define XGENE_NUM_TXC_RING     8
 
 #define START_CPU_BUFNUM_0     0
 #define START_ETH_BUFNUM_0     2
@@ -121,6 +121,16 @@ struct xgene_enet_desc_ring {
                struct xgene_enet_raw_desc16 *raw_desc16;
        };
        __le64 *exp_bufs;
+       u64 tx_packets;
+       u64 tx_bytes;
+       u64 rx_packets;
+       u64 rx_bytes;
+       u64 rx_dropped;
+       u64 rx_errors;
+       u64 rx_length_errors;
+       u64 rx_crc_errors;
+       u64 rx_frame_errors;
+       u64 rx_fifo_errors;
 };
 
 struct xgene_mac_ops {
index 29a71b4dcc44f361ef61c90bee1575984a5f47ea..002df5a6756e06e2e9f6100dba0a13d9584227d6 100644 (file)
@@ -33,7 +33,7 @@
 #define LINK_STATUS                    BIT(2)
 #define LINK_UP                                BIT(15)
 #define MPA_IDLE_WITH_QMI_EMPTY                BIT(12)
-#define SG_RX_DV_GATE_REG_0_ADDR       0x0dfc
+#define SG_RX_DV_GATE_REG_0_ADDR       0x05fc
 
 extern const struct xgene_mac_ops xgene_sgmac_ops;
 extern const struct xgene_port_ops xgene_sgport_ops;
index 12a009d720cde6d85fd660c798616aee82be0ba9..c39a7f5c6a0170abbddb0f962a5d7b3cb4cf4fb1 100644 (file)
@@ -581,12 +581,30 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        struct page *page;
        dma_addr_t mapping;
        u16 sw_prod = rxr->rx_sw_agg_prod;
+       unsigned int offset = 0;
 
-       page = alloc_page(gfp);
-       if (!page)
-               return -ENOMEM;
+       if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
+               page = rxr->rx_page;
+               if (!page) {
+                       page = alloc_page(gfp);
+                       if (!page)
+                               return -ENOMEM;
+                       rxr->rx_page = page;
+                       rxr->rx_page_offset = 0;
+               }
+               offset = rxr->rx_page_offset;
+               rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
+               if (rxr->rx_page_offset == PAGE_SIZE)
+                       rxr->rx_page = NULL;
+               else
+                       get_page(page);
+       } else {
+               page = alloc_page(gfp);
+               if (!page)
+                       return -ENOMEM;
+       }
 
-       mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
+       mapping = dma_map_page(&pdev->dev, page, offset, BNXT_RX_PAGE_SIZE,
                               PCI_DMA_FROMDEVICE);
        if (dma_mapping_error(&pdev->dev, mapping)) {
                __free_page(page);
@@ -601,6 +619,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp,
        rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
 
        rx_agg_buf->page = page;
+       rx_agg_buf->offset = offset;
        rx_agg_buf->mapping = mapping;
        rxbd->rx_bd_haddr = cpu_to_le64(mapping);
        rxbd->rx_bd_opaque = sw_prod;
@@ -642,6 +661,7 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
                page = cons_rx_buf->page;
                cons_rx_buf->page = NULL;
                prod_rx_buf->page = page;
+               prod_rx_buf->offset = cons_rx_buf->offset;
 
                prod_rx_buf->mapping = cons_rx_buf->mapping;
 
@@ -709,7 +729,8 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
                            RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
 
                cons_rx_buf = &rxr->rx_agg_ring[cons];
-               skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
+               skb_fill_page_desc(skb, i, cons_rx_buf->page,
+                                  cons_rx_buf->offset, frag_len);
                __clear_bit(cons, rxr->rx_agg_bmap);
 
                /* It is possible for bnxt_alloc_rx_page() to allocate
@@ -740,7 +761,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
                        return NULL;
                }
 
-               dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
+               dma_unmap_page(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
                               PCI_DMA_FROMDEVICE);
 
                skb->data_len += frag_len;
@@ -792,6 +813,46 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
        return skb;
 }
 
+static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
+                          u32 *raw_cons, void *cmp)
+{
+       struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+       struct rx_cmp *rxcmp = cmp;
+       u32 tmp_raw_cons = *raw_cons;
+       u8 cmp_type, agg_bufs = 0;
+
+       cmp_type = RX_CMP_TYPE(rxcmp);
+
+       if (cmp_type == CMP_TYPE_RX_L2_CMP) {
+               agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
+                           RX_CMP_AGG_BUFS) >>
+                          RX_CMP_AGG_BUFS_SHIFT;
+       } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
+               struct rx_tpa_end_cmp *tpa_end = cmp;
+
+               agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+                           RX_TPA_END_CMP_AGG_BUFS) >>
+                          RX_TPA_END_CMP_AGG_BUFS_SHIFT;
+       }
+
+       if (agg_bufs) {
+               if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
+                       return -EBUSY;
+       }
+       *raw_cons = tmp_raw_cons;
+       return 0;
+}
+
+static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+       if (!rxr->bnapi->in_reset) {
+               rxr->bnapi->in_reset = true;
+               set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
+               schedule_work(&bp->sp_task);
+       }
+       rxr->rx_next_cons = 0xffff;
+}
+
 static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
                           struct rx_tpa_start_cmp *tpa_start,
                           struct rx_tpa_start_cmp_ext *tpa_start1)
@@ -809,6 +870,11 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
        prod_rx_buf = &rxr->rx_buf_ring[prod];
        tpa_info = &rxr->rx_tpa[agg_id];
 
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               bnxt_sched_reset(bp, rxr);
+               return;
+       }
+
        prod_rx_buf->data = tpa_info->data;
 
        mapping = tpa_info->mapping;
@@ -846,6 +912,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
 
        rxr->rx_prod = NEXT_RX(prod);
        cons = NEXT_RX(cons);
+       rxr->rx_next_cons = NEXT_RX(cons);
        cons_rx_buf = &rxr->rx_buf_ring[cons];
 
        bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
@@ -959,6 +1026,14 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
        dma_addr_t mapping;
        struct sk_buff *skb;
 
+       if (unlikely(bnapi->in_reset)) {
+               int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
+
+               if (rc < 0)
+                       return ERR_PTR(-EBUSY);
+               return NULL;
+       }
+
        tpa_info = &rxr->rx_tpa[agg_id];
        data = tpa_info->data;
        prefetch(data);
@@ -1125,6 +1200,12 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
        cons = rxcmp->rx_cmp_opaque;
        rx_buf = &rxr->rx_buf_ring[cons];
        data = rx_buf->data;
+       if (unlikely(cons != rxr->rx_next_cons)) {
+               int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
+
+               bnxt_sched_reset(bp, rxr);
+               return rc1;
+       }
        prefetch(data);
 
        agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
@@ -1224,6 +1305,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
 
 next_rx:
        rxr->rx_prod = NEXT_RX(prod);
+       rxr->rx_next_cons = NEXT_RX(cons);
 
 next_rx_no_prod:
        *raw_cons = tmp_raw_cons;
@@ -1367,6 +1449,10 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
                if (!TX_CMP_VALID(txcmp, raw_cons))
                        break;
 
+               /* The valid test of the entry must be done first before
+                * reading any further.
+                */
+               rmb();
                if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
                        tx_pkts++;
                        /* return full budget so NAPI will complete. */
@@ -1584,13 +1670,17 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
 
                        dma_unmap_page(&pdev->dev,
                                       dma_unmap_addr(rx_agg_buf, mapping),
-                                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
+                                      BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE);
 
                        rx_agg_buf->page = NULL;
                        __clear_bit(j, rxr->rx_agg_bmap);
 
                        __free_page(page);
                }
+               if (rxr->rx_page) {
+                       __free_page(rxr->rx_page);
+                       rxr->rx_page = NULL;
+               }
        }
 }
 
@@ -1973,7 +2063,7 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
        if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
                return 0;
 
-       type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
+       type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
                RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
 
        bnxt_init_rxbd_pages(ring, type);
@@ -2164,7 +2254,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
        bp->rx_agg_nr_pages = 0;
 
        if (bp->flags & BNXT_FLAG_TPA)
-               agg_factor = 4;
+               agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
 
        bp->flags &= ~BNXT_FLAG_JUMBO;
        if (rx_space > PAGE_SIZE) {
@@ -2457,6 +2547,7 @@ static void bnxt_clear_ring_indices(struct bnxt *bp)
                        rxr->rx_prod = 0;
                        rxr->rx_agg_prod = 0;
                        rxr->rx_sw_agg_prod = 0;
+                       rxr->rx_next_cons = 0;
                }
        }
 }
@@ -3020,12 +3111,12 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
                /* Number of segs are log2 units, and first packet is not
                 * included as part of this units.
                 */
-               if (mss <= PAGE_SIZE) {
-                       n = PAGE_SIZE / mss;
+               if (mss <= BNXT_RX_PAGE_SIZE) {
+                       n = BNXT_RX_PAGE_SIZE / mss;
                        nsegs = (MAX_SKB_FRAGS - 1) * n;
                } else {
-                       n = mss / PAGE_SIZE;
-                       if (mss & (PAGE_SIZE - 1))
+                       n = mss / BNXT_RX_PAGE_SIZE;
+                       if (mss & (BNXT_RX_PAGE_SIZE - 1))
                                n++;
                        nsegs = (MAX_SKB_FRAGS - n) / n;
                }
@@ -4013,9 +4104,11 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
 }
 
 static int bnxt_cfg_rx_mode(struct bnxt *);
+static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
 
 static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
 {
+       struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
        int rc = 0;
 
        if (irq_re_init) {
@@ -4071,13 +4164,22 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
                netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
                goto err_out;
        }
-       bp->vnic_info[0].uc_filter_count = 1;
+       vnic->uc_filter_count = 1;
 
-       bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+       vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
 
        if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
-               bp->vnic_info[0].rx_mask |=
-                               CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
+
+       if (bp->dev->flags & IFF_ALLMULTI) {
+               vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
+               vnic->mc_list_count = 0;
+       } else {
+               u32 mask = 0;
+
+               bnxt_mc_list_updated(bp, &mask);
+               vnic->rx_mask |= mask;
+       }
 
        rc = bnxt_cfg_rx_mode(bp);
        if (rc)
@@ -4309,7 +4411,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
        if (bp->flags & BNXT_FLAG_MSIX_CAP)
                rc = bnxt_setup_msix(bp);
 
-       if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
+       if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
                /* fallback to INTA */
                rc = bnxt_setup_inta(bp);
        }
@@ -4422,6 +4524,7 @@ static void bnxt_enable_napi(struct bnxt *bp)
        int i;
 
        for (i = 0; i < bp->cp_nr_rings; i++) {
+               bp->bnapi[i]->in_reset = false;
                bnxt_enable_poll(bp->bnapi[i]);
                napi_enable(&bp->bnapi[i]->napi);
        }
index 709b95b8fcbad5742399607c9ec98a504ac05754..de9d53eee3ddad5742898383c34fafab89348fb7 100644 (file)
@@ -407,6 +407,15 @@ struct rx_tpa_end_cmp_ext {
 
 #define BNXT_PAGE_SIZE (1 << BNXT_PAGE_SHIFT)
 
+/* The RXBD length is 16-bit so we can only support page sizes < 64K */
+#if (PAGE_SHIFT > 15)
+#define BNXT_RX_PAGE_SHIFT 15
+#else
+#define BNXT_RX_PAGE_SHIFT PAGE_SHIFT
+#endif
+
+#define BNXT_RX_PAGE_SIZE (1 << BNXT_RX_PAGE_SHIFT)
+
 #define BNXT_MIN_PKT_SIZE      45
 
 #define BNXT_NUM_TESTS(bp)     0
@@ -506,6 +515,7 @@ struct bnxt_sw_rx_bd {
 
 struct bnxt_sw_rx_agg_bd {
        struct page             *page;
+       unsigned int            offset;
        dma_addr_t              mapping;
 };
 
@@ -574,6 +584,7 @@ struct bnxt_rx_ring_info {
        u16                     rx_prod;
        u16                     rx_agg_prod;
        u16                     rx_sw_agg_prod;
+       u16                     rx_next_cons;
        void __iomem            *rx_doorbell;
        void __iomem            *rx_agg_doorbell;
 
@@ -586,6 +597,9 @@ struct bnxt_rx_ring_info {
        unsigned long           *rx_agg_bmap;
        u16                     rx_agg_bmap_size;
 
+       struct page             *rx_page;
+       unsigned int            rx_page_offset;
+
        dma_addr_t              rx_desc_mapping[MAX_RX_PAGES];
        dma_addr_t              rx_agg_desc_mapping[MAX_RX_AGG_PAGES];
 
@@ -623,6 +637,7 @@ struct bnxt_napi {
 #ifdef CONFIG_NET_RX_BUSY_POLL
        atomic_t                poll_state;
 #endif
+       bool                    in_reset;
 };
 
 #ifdef CONFIG_NET_RX_BUSY_POLL
index 48a7d7dee8461117b3f47bcbdebe4c0745385bf7..a63551d0a18a4b63a5ea385a46fabef39b0d36a8 100644 (file)
@@ -441,7 +441,7 @@ static int macb_mii_init(struct macb *bp)
        snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
                bp->pdev->name, bp->pdev->id);
        bp->mii_bus->priv = bp;
-       bp->mii_bus->parent = &bp->dev->dev;
+       bp->mii_bus->parent = &bp->pdev->dev;
        pdata = dev_get_platdata(&bp->pdev->dev);
 
        dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
@@ -458,7 +458,8 @@ static int macb_mii_init(struct macb *bp)
                                struct phy_device *phydev;
 
                                phydev = mdiobus_scan(bp->mii_bus, i);
-                               if (IS_ERR(phydev)) {
+                               if (IS_ERR(phydev) &&
+                                   PTR_ERR(phydev) != -ENODEV) {
                                        err = PTR_ERR(phydev);
                                        break;
                                }
@@ -3019,29 +3020,36 @@ static int macb_probe(struct platform_device *pdev)
        if (err)
                goto err_out_free_netdev;
 
+       err = macb_mii_init(bp);
+       if (err)
+               goto err_out_free_netdev;
+
+       phydev = bp->phy_dev;
+
+       netif_carrier_off(dev);
+
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
-               goto err_out_unregister_netdev;
+               goto err_out_unregister_mdio;
        }
 
-       err = macb_mii_init(bp);
-       if (err)
-               goto err_out_unregister_netdev;
-
-       netif_carrier_off(dev);
+       phy_attached_info(phydev);
 
        netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
                    macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
                    dev->base_addr, dev->irq, dev->dev_addr);
 
-       phydev = bp->phy_dev;
-       phy_attached_info(phydev);
-
        return 0;
 
-err_out_unregister_netdev:
-       unregister_netdev(dev);
+err_out_unregister_mdio:
+       phy_disconnect(bp->phy_dev);
+       mdiobus_unregister(bp->mii_bus);
+       mdiobus_free(bp->mii_bus);
+
+       /* Shutdown the PHY if there is a GPIO reset */
+       if (bp->reset_gpio)
+               gpiod_set_value(bp->reset_gpio, 0);
 
 err_out_free_netdev:
        free_netdev(dev);
index fa05e347262ff76bde5c64f32146ea0daea7beb2..06b819db51b18d9e3aaa8e1ada8ed8e929597143 100644 (file)
@@ -533,6 +533,7 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
                nicvf_config_vlan_stripping(nic, nic->netdev->features);
 
        /* Enable Receive queue */
+       memset(&rq_cfg, 0, sizeof(struct rq_cfg));
        rq_cfg.ena = 1;
        rq_cfg.tcp_ena = 0;
        nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
@@ -565,6 +566,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
                              qidx, (u64)(cq->dmem.phys_base));
 
        /* Enable Completion queue */
+       memset(&cq_cfg, 0, sizeof(struct cq_cfg));
        cq_cfg.ena = 1;
        cq_cfg.reset = 0;
        cq_cfg.caching = 0;
@@ -613,6 +615,7 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
                              qidx, (u64)(sq->dmem.phys_base));
 
        /* Enable send queue  & set queue size */
+       memset(&sq_cfg, 0, sizeof(struct sq_cfg));
        sq_cfg.ena = 1;
        sq_cfg.reset = 0;
        sq_cfg.ldwb = 0;
@@ -649,6 +652,7 @@ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
 
        /* Enable RBDR  & set queue size */
        /* Buffer size should be in multiples of 128 bytes */
+       memset(&rbdr_cfg, 0, sizeof(struct rbdr_cfg));
        rbdr_cfg.ena = 1;
        rbdr_cfg.reset = 0;
        rbdr_cfg.ldwb = 0;
index 60908eab3b3adf805d1b6cef462bd24ec5f64fe6..43da891fab97e7f16b572dd67f3fc6aa9020a1fb 100644 (file)
@@ -576,7 +576,7 @@ static void setup_rss(struct adapter *adap)
        unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
        unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
        u8 cpus[SGE_QSETS + 1];
-       u16 rspq_map[RSS_TABLE_SIZE];
+       u16 rspq_map[RSS_TABLE_SIZE + 1];
 
        for (i = 0; i < SGE_QSETS; ++i)
                cpus[i] = i;
@@ -586,6 +586,7 @@ static void setup_rss(struct adapter *adap)
                rspq_map[i] = i % nq0;
                rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
        }
+       rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
 
        t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
                      F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
index 1f23845a0694b5e64eaaa0f71ffca230d54feaf0..085f9125cf42a6c1aa76bbb6744ba5b9ba919d31 100644 (file)
@@ -145,7 +145,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
        u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
 
        /* Check if we got TX */
-       if (!priv->tx_packet_sent || tx_ctrl_ct)
+       if (!priv->tx_skb || tx_ctrl_ct)
                return;
 
        /* Ack Tx ctrl register */
@@ -160,7 +160,7 @@ static void nps_enet_tx_handler(struct net_device *ndev)
        }
 
        dev_kfree_skb(priv->tx_skb);
-       priv->tx_packet_sent = false;
+       priv->tx_skb = NULL;
 
        if (netif_queue_stopped(ndev))
                netif_wake_queue(ndev);
@@ -183,6 +183,9 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
        work_done = nps_enet_rx_handler(ndev);
        if (work_done < budget) {
                u32 buf_int_enable_value = 0;
+               u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+               u32 tx_ctrl_ct =
+                       (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
 
                napi_complete(napi);
 
@@ -192,6 +195,18 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
 
                nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE,
                                 buf_int_enable_value);
+
+               /* in case we will get a tx interrupt while interrupts
+                * are masked, we will lose it since the tx is edge interrupt.
+                * specifically, while executing the code section above,
+                * between nps_enet_tx_handler and the interrupts enable, all
+                * tx requests will be stuck until we will get an rx interrupt.
+                * the two code lines below will solve this situation by
+                * re-adding ourselves to the poll list.
+                */
+
+               if (priv->tx_skb && !tx_ctrl_ct)
+                       napi_reschedule(napi);
        }
 
        return work_done;
@@ -217,7 +232,7 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
        u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
        u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
 
-       if ((!tx_ctrl_ct && priv->tx_packet_sent) || rx_ctrl_cr)
+       if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr)
                if (likely(napi_schedule_prep(&priv->napi))) {
                        nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
                        __napi_schedule(&priv->napi);
@@ -387,8 +402,6 @@ static void nps_enet_send_frame(struct net_device *ndev,
        /* Write the length of the Frame */
        tx_ctrl_value |= length << TX_CTL_NT_SHIFT;
 
-       /* Indicate SW is done */
-       priv->tx_packet_sent = true;
        tx_ctrl_value |= NPS_ENET_ENABLE << TX_CTL_CT_SHIFT;
        /* Send Frame */
        nps_enet_reg_set(priv, NPS_ENET_REG_TX_CTL, tx_ctrl_value);
@@ -465,7 +478,7 @@ static s32 nps_enet_open(struct net_device *ndev)
        s32 err;
 
        /* Reset private variables */
-       priv->tx_packet_sent = false;
+       priv->tx_skb = NULL;
        priv->ge_mac_cfg_2_value = 0;
        priv->ge_mac_cfg_3_value = 0;
 
@@ -534,6 +547,11 @@ static netdev_tx_t nps_enet_start_xmit(struct sk_buff *skb,
 
        priv->tx_skb = skb;
 
+       /* make sure tx_skb is actually written to the memory
+        * before the HW is informed and the IRQ is fired.
+        */
+       wmb();
+
        nps_enet_send_frame(ndev, skb);
 
        return NETDEV_TX_OK;
index d0cab600bce8d94bbfde1fbd30c2fca76fe2cb87..3939ca20cc9fa0b01b108504fae363927ca92472 100644 (file)
  * struct nps_enet_priv - Storage of ENET's private information.
  * @regs_base:      Base address of ENET memory-mapped control registers.
  * @irq:            For RX/TX IRQ number.
- * @tx_packet_sent: SW indication if frame is being sent.
  * @tx_skb:         socket buffer of sent frame.
  * @napi:           Structure for NAPI.
  */
 struct nps_enet_priv {
        void __iomem *regs_base;
        s32 irq;
-       bool tx_packet_sent;
        struct sk_buff *tx_skb;
        struct napi_struct napi;
        u32 ge_mac_cfg_2_value;
index 08243c2ff4b4ae3d201c95c0bbc9caa5a7bd776c..2a03857cca18e55adfd5139b3c33755b19b7e3c5 100644 (file)
@@ -1521,9 +1521,15 @@ fec_enet_rx(struct net_device *ndev, int budget)
        struct fec_enet_private *fep = netdev_priv(ndev);
 
        for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
-               clear_bit(queue_id, &fep->work_rx);
-               pkt_received += fec_enet_rx_queue(ndev,
+               int ret;
+
+               ret = fec_enet_rx_queue(ndev,
                                        budget - pkt_received, queue_id);
+
+               if (ret < budget - pkt_received)
+                       clear_bit(queue_id, &fep->work_rx);
+
+               pkt_received += ret;
        }
        return pkt_received;
 }
index b5c6d42daa1208dab6771d0b7578f7cdb24bde02..2664827ddecd969b3e61886c16e7059c74dbf46f 100644 (file)
@@ -68,7 +68,7 @@ config MVNETA
 
 config MVNETA_BM
        tristate
-       default y if MVNETA=y && MVNETA_BM_ENABLE
+       default y if MVNETA=y && MVNETA_BM_ENABLE!=n
        default MVNETA_BM_ENABLE
        select HWBM
        help
index 7fc490225da507de93c66c1b55eba3f0e5295e58..a6d26d351dfc47c777b39c04a44c2f17bca0feab 100644 (file)
@@ -3354,8 +3354,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Enable per-CPU interrupts on the CPU that is
                 * brought up.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_enable,
-                                        pp, true);
+               mvneta_percpu_enable(pp);
 
                /* Enable per-CPU interrupt on the one CPU we care
                 * about.
@@ -3387,8 +3386,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
                /* Disable per-CPU interrupts on the CPU that is
                 * brought down.
                 */
-               smp_call_function_single(cpu, mvneta_percpu_disable,
-                                        pp, true);
+               mvneta_percpu_disable(pp);
 
                break;
        case CPU_DEAD:
index 7ace07dad6a31d4b18ab5aa1e5335c0727cff596..c442f6ad15fff806af5f747e70d085766dd37ae9 100644 (file)
@@ -979,6 +979,8 @@ static int pxa168_init_phy(struct net_device *dev)
                return 0;
 
        pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+       if (IS_ERR(pep->phy))
+               return PTR_ERR(pep->phy);
        if (!pep->phy)
                return -ENODEV;
 
index b723e3bcab39e83653be5367da514bd876b9d0cd..ca3a38421ee74789abda598bc96c0b904095b8f2 100644 (file)
@@ -707,7 +707,7 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
 
        if (ipv6h->nexthdr == IPPROTO_FRAGMENT || ipv6h->nexthdr == IPPROTO_HOPOPTS)
                return -1;
-       hw_checksum = csum_add(hw_checksum, (__force __wsum)(ipv6h->nexthdr << 8));
+       hw_checksum = csum_add(hw_checksum, (__force __wsum)htons(ipv6h->nexthdr));
 
        csum_pseudo_hdr = csum_partial(&ipv6h->saddr,
                                       sizeof(ipv6h->saddr) + sizeof(ipv6h->daddr), 0);
index 1cf722eba607d82e8e34ff498e1b154f8fc8cc5d..f5c3b9465d8d264570e445d4680f48af239d8b41 100644 (file)
@@ -31,3 +31,10 @@ config MLX5_CORE_EN_DCB
          This flag is depended on the kernel's DCB support.
 
          If unsure, set to Y
+
+config MLX5_CORE_EN_VXLAN
+       bool "VXLAN offloads Support"
+       default y
+       depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m)
+       ---help---
+         Say Y here if you want to use VXLAN offloads in the driver.
index 4fc45ee0c5d165c1b723002cf83472024d1ca393..bf65b71c736083982fa8734c450b92cb2955e2d9 100644 (file)
@@ -6,6 +6,7 @@ mlx5_core-y :=  main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
 
 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
                en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
-               en_txrx.o en_clock.o vxlan.o en_tc.o
+               en_txrx.o en_clock.o en_tc.o
 
+mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o
 mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) +=  en_dcbnl.o
index e80ce94b5dcf0e139b629732da3658892ca8590d..24344aafbd36c2b4e78fba478d6413074d0009b5 100644 (file)
@@ -564,9 +564,12 @@ struct mlx5e_priv {
        struct mlx5e_flow_tables   fts;
        struct mlx5e_eth_addr_db   eth_addr;
        struct mlx5e_vlan_db       vlan;
+#ifdef CONFIG_MLX5_CORE_EN_VXLAN
        struct mlx5e_vxlan_db      vxlan;
+#endif
 
        struct mlx5e_params        params;
+       struct workqueue_struct    *wq;
        struct work_struct         update_carrier_work;
        struct work_struct         set_rx_mode_work;
        struct delayed_work        update_stats_work;
index 67d548b70e142a376d0fc802a4576ee965f92520..94fef705890baf3f7d191ce99bf10ec201dbc400 100644 (file)
@@ -262,9 +262,8 @@ static void mlx5e_update_stats_work(struct work_struct *work)
        mutex_lock(&priv->state_lock);
        if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
                mlx5e_update_stats(priv);
-               schedule_delayed_work(dwork,
-                                     msecs_to_jiffies(
-                                             MLX5E_UPDATE_STATS_INTERVAL));
+               queue_delayed_work(priv->wq, dwork,
+                                  msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
        }
        mutex_unlock(&priv->state_lock);
 }
@@ -280,7 +279,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        switch (event) {
        case MLX5_DEV_EVENT_PORT_UP:
        case MLX5_DEV_EVENT_PORT_DOWN:
-               schedule_work(&priv->update_carrier_work);
+               queue_work(priv->wq, &priv->update_carrier_work);
                break;
 
        default:
@@ -1505,7 +1504,7 @@ int mlx5e_open_locked(struct net_device *netdev)
        mlx5e_update_carrier(priv);
        mlx5e_timestamp_init(priv);
 
-       schedule_delayed_work(&priv->update_stats_work, 0);
+       queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
 
        return 0;
 
@@ -1961,7 +1960,7 @@ static void mlx5e_set_rx_mode(struct net_device *dev)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 }
 
 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
@@ -1976,7 +1975,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
        ether_addr_copy(netdev->dev_addr, saddr->sa_data);
        netif_addr_unlock_bh(netdev);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return 0;
 }
@@ -2150,6 +2149,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
                                            vf_stats);
 }
 
+#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN)
 static void mlx5e_add_vxlan_port(struct net_device *netdev,
                                 sa_family_t sa_family, __be16 port)
 {
@@ -2158,7 +2158,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
 }
 
 static void mlx5e_del_vxlan_port(struct net_device *netdev,
@@ -2169,7 +2169,7 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev,
        if (!mlx5e_vxlan_allowed(priv->mdev))
                return;
 
-       mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
+       mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
 }
 
 static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2221,6 +2221,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
 
        return features;
 }
+#endif
 
 static const struct net_device_ops mlx5e_netdev_ops_basic = {
        .ndo_open                = mlx5e_open,
@@ -2252,9 +2253,11 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
        .ndo_set_features        = mlx5e_set_features,
        .ndo_change_mtu          = mlx5e_change_mtu,
        .ndo_do_ioctl            = mlx5e_ioctl,
+#ifdef CONFIG_MLX5_CORE_EN_VXLAN
        .ndo_add_vxlan_port      = mlx5e_add_vxlan_port,
        .ndo_del_vxlan_port      = mlx5e_del_vxlan_port,
        .ndo_features_check      = mlx5e_features_check,
+#endif
        .ndo_set_vf_mac          = mlx5e_set_vf_mac,
        .ndo_set_vf_vlan         = mlx5e_set_vf_vlan,
        .ndo_get_vf_config       = mlx5e_get_vf_config,
@@ -2498,10 +2501,14 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
 
        priv = netdev_priv(netdev);
 
+       priv->wq = create_singlethread_workqueue("mlx5e");
+       if (!priv->wq)
+               goto err_free_netdev;
+
        err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
        if (err) {
                mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
-               goto err_free_netdev;
+               goto err_destroy_wq;
        }
 
        err = mlx5_core_alloc_pd(mdev, &priv->pdn);
@@ -2580,7 +2587,7 @@ static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
                vxlan_get_rx_port(netdev);
 
        mlx5e_enable_async_events(priv);
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
 
        return priv;
 
@@ -2617,6 +2624,9 @@ err_dealloc_pd:
 err_unmap_free_uar:
        mlx5_unmap_free_uar(mdev, &priv->cq_uar);
 
+err_destroy_wq:
+       destroy_workqueue(priv->wq);
+
 err_free_netdev:
        free_netdev(netdev);
 
@@ -2630,9 +2640,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
 
        set_bit(MLX5E_STATE_DESTROYING, &priv->state);
 
-       schedule_work(&priv->set_rx_mode_work);
+       queue_work(priv->wq, &priv->set_rx_mode_work);
        mlx5e_disable_async_events(priv);
-       flush_scheduled_work();
+       flush_workqueue(priv->wq);
        if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
                netif_device_detach(netdev);
                mutex_lock(&priv->state_lock);
@@ -2655,6 +2665,8 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
        mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
        mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
        mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+       cancel_delayed_work_sync(&priv->update_stats_work);
+       destroy_workqueue(priv->wq);
 
        if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
                free_netdev(netdev);
index 8ba080e441a1863e63d4188820afbd2f1c39bd6e..5ff8af472bf5221e736e5369b4de73bfe996b0e9 100644 (file)
@@ -269,8 +269,10 @@ EXPORT_SYMBOL(mlx5_alloc_map_uar);
 
 void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
 {
-       iounmap(uar->map);
-       iounmap(uar->bf_map);
+       if (uar->map)
+               iounmap(uar->map);
+       else
+               iounmap(uar->bf_map);
        mlx5_cmd_free_uar(mdev, uar->index);
 }
 EXPORT_SYMBOL(mlx5_unmap_free_uar);
index 9f10df25f3cd52d9f631fd1e0963483eb1e7073a..f2fd1ef16da7eba68deb5af1648b7ff28cf7ff44 100644 (file)
@@ -95,21 +95,22 @@ struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port)
        return vxlan;
 }
 
-int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_add_port(struct work_struct *work)
 {
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
        struct mlx5e_vxlan_db *vxlan_db = &priv->vxlan;
+       u16 port = vxlan_work->port;
        struct mlx5e_vxlan *vxlan;
        int err;
 
-       err = mlx5e_vxlan_core_add_port_cmd(priv->mdev, port);
-       if (err)
-               return err;
+       if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
+               goto free_work;
 
        vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL);
-       if (!vxlan) {
-               err = -ENOMEM;
+       if (!vxlan)
                goto err_delete_port;
-       }
 
        vxlan->udp_port = port;
 
@@ -119,13 +120,14 @@ int mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port)
        if (err)
                goto err_free;
 
-       return 0;
+       goto free_work;
 
 err_free:
        kfree(vxlan);
 err_delete_port:
        mlx5e_vxlan_core_del_port_cmd(priv->mdev, port);
-       return err;
+free_work:
+       kfree(vxlan_work);
 }
 
 static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
@@ -145,12 +147,36 @@ static void __mlx5e_vxlan_core_del_port(struct mlx5e_priv *priv, u16 port)
        kfree(vxlan);
 }
 
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port)
+static void mlx5e_vxlan_del_port(struct work_struct *work)
 {
-       if (!mlx5e_vxlan_lookup_port(priv, port))
-               return;
+       struct mlx5e_vxlan_work *vxlan_work =
+               container_of(work, struct mlx5e_vxlan_work, work);
+       struct mlx5e_priv *priv = vxlan_work->priv;
+       u16 port = vxlan_work->port;
 
        __mlx5e_vxlan_core_del_port(priv, port);
+
+       kfree(vxlan_work);
+}
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add)
+{
+       struct mlx5e_vxlan_work *vxlan_work;
+
+       vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
+       if (!vxlan_work)
+               return;
+
+       if (add)
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_port);
+       else
+               INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_port);
+
+       vxlan_work->priv = priv;
+       vxlan_work->port = port;
+       vxlan_work->sa_family = sa_family;
+       queue_work(priv->wq, &vxlan_work->work);
 }
 
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv)
index a01685056ab156201c1945607e34bb7b53771ee3..217ac530a514f2c2a06176ecb9e357a9702855e7 100644 (file)
@@ -39,16 +39,30 @@ struct mlx5e_vxlan {
        u16 udp_port;
 };
 
+struct mlx5e_vxlan_work {
+       struct work_struct      work;
+       struct mlx5e_priv       *priv;
+       sa_family_t             sa_family;
+       u16                     port;
+};
+
 static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
 {
-       return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
+       return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) &&
+               (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
                mlx5_core_is_pf(mdev));
 }
 
+#ifdef CONFIG_MLX5_CORE_EN_VXLAN
 void mlx5e_vxlan_init(struct mlx5e_priv *priv);
-int  mlx5e_vxlan_add_port(struct mlx5e_priv *priv, u16 port);
-void mlx5e_vxlan_del_port(struct mlx5e_priv *priv, u16 port);
-struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
 void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
+#else
+static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {}
+static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {}
+#endif
+
+void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
+                           u16 port, int add);
+struct mlx5e_vxlan *mlx5e_vxlan_lookup_port(struct mlx5e_priv *priv, u16 port);
 
 #endif /* __MLX5_VXLAN_H__ */
index 4afbc3e9e381bffff2bdabad30bee6653ef21eae..668b2f465ca516ac5e810cc5ee8d30ac0a6eee4a 100644 (file)
@@ -2541,11 +2541,11 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
        lag->ref_count++;
        return 0;
 
+err_col_port_enable:
+       mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
 err_col_port_add:
        if (!lag->ref_count)
                mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
-err_col_port_enable:
-       mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
        return err;
 }
 
index e1c74efff51ae16e585188d7d3fdbacbe071bee5..9cd6f472234ad2cd5c7e0a70e27e415a46e913c1 100644 (file)
@@ -214,7 +214,15 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
        mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
                            table_type, range, local_port, set);
        err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+       if (err)
+               goto err_flood_bm_set;
+       else
+               goto buffer_out;
 
+err_flood_bm_set:
+       mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
+                           table_type, range, local_port, !set);
+       mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
 buffer_out:
        kfree(sftr_pl);
        return err;
index 270c9eeb7ab622955d237ae9cd96cadc5caa5f3a..6d1a956e3f779d57e83a8fd40a5136919231df2c 100644 (file)
@@ -2668,9 +2668,9 @@ static int myri10ge_close(struct net_device *dev)
 
        del_timer_sync(&mgp->watchdog_timer);
        mgp->running = MYRI10GE_ETH_STOPPING;
-       local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
        for (i = 0; i < mgp->num_slices; i++) {
                napi_disable(&mgp->ss[i].napi);
+               local_bh_disable(); /* myri10ge_ss_lock_napi needs this */
                /* Lock the slice to prevent the busy_poll handler from
                 * accessing it.  Later when we bring the NIC up, myri10ge_open
                 * resets the slice including this lock.
@@ -2679,8 +2679,8 @@ static int myri10ge_close(struct net_device *dev)
                        pr_info("Slice %d locked\n", i);
                        mdelay(1);
                }
+               local_bh_enable();
        }
-       local_bh_enable();
        netif_carrier_off(dev);
 
        netif_tx_stop_all_queues(dev);
index db80eb1c6d4fc5ebccea52aa86e87a7578ead04f..2b10f1bcd1517458b2cfcb22e6ce4677995181a8 100644 (file)
@@ -1015,20 +1015,24 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
 {
        int i, v, addr;
        __le32 *ptr32;
+       int ret;
 
        addr = base;
        ptr32 = buf;
        for (i = 0; i < size / sizeof(u32); i++) {
-               if (netxen_rom_fast_read(adapter, addr, &v) == -1)
-                       return -1;
+               ret = netxen_rom_fast_read(adapter, addr, &v);
+               if (ret)
+                       return ret;
+
                *ptr32 = cpu_to_le32(v);
                ptr32++;
                addr += sizeof(u32);
        }
        if ((char *)buf + size > (char *)ptr32) {
                __le32 local;
-               if (netxen_rom_fast_read(adapter, addr, &v) == -1)
-                       return -1;
+               ret = netxen_rom_fast_read(adapter, addr, &v);
+               if (ret)
+                       return ret;
                local = cpu_to_le32(v);
                memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
        }
@@ -1940,7 +1944,7 @@ void netxen_nic_set_link_parameters(struct netxen_adapter *adapter)
                                if (adapter->phy_read &&
                                    adapter->phy_read(adapter,
                                                      NETXEN_NIU_GB_MII_MGMT_ADDR_AUTONEG,
-                                                     &autoneg) != 0)
+                                                     &autoneg) == 0)
                                        adapter->link_autoneg = autoneg;
                        } else
                                goto link_down;
index fd362b6923f48b9fff5c55fdcf7fd241d94ef9aa..9c6eed9b45f7ed78eab2bccb459763a28643ddce 100644 (file)
@@ -852,7 +852,8 @@ netxen_check_options(struct netxen_adapter *adapter)
        ptr32 = (__le32 *)&serial_num;
        offset = NX_FW_SERIAL_NUM_OFFSET;
        for (i = 0; i < 8; i++) {
-               if (netxen_rom_fast_read(adapter, offset, &val) == -1) {
+               err = netxen_rom_fast_read(adapter, offset, &val);
+               if (err) {
                        dev_err(&pdev->dev, "error reading board info\n");
                        adapter->driver_mismatch = 1;
                        return;
index 7869465435fa81e9c9ebd9e5ae780b01efb20087..12f6615797ded20fe3dc8895d32cb2fcb5e93071 100644 (file)
@@ -421,7 +421,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
        u8 xmit_type;
        u16 idx;
        u16 hlen;
-       bool data_split;
+       bool data_split = false;
 
        /* Get tx-queue context and netdev index */
        txq_index = skb_get_queue_mapping(skb);
@@ -1938,8 +1938,6 @@ static struct qede_dev *qede_alloc_etherdev(struct qed_dev *cdev,
        edev->q_num_rx_buffers = NUM_RX_BDS_DEF;
        edev->q_num_tx_buffers = NUM_TX_BDS_DEF;
 
-       DP_INFO(edev, "Allocated netdev with 64 tx queues and 64 rx queues\n");
-
        SET_NETDEV_DEV(ndev, &pdev->dev);
 
        memset(&edev->stats, 0, sizeof(edev->stats));
@@ -2090,9 +2088,9 @@ static void qede_update_pf_params(struct qed_dev *cdev)
 {
        struct qed_pf_params pf_params;
 
-       /* 16 rx + 16 tx */
+       /* 64 rx + 64 tx */
        memset(&pf_params, 0, sizeof(struct qed_pf_params));
-       pf_params.eth_pf_params.num_cons = 32;
+       pf_params.eth_pf_params.num_cons = 128;
        qed_ops->common->update_pf_params(cdev, &pf_params);
 }
 
index cda9e604a95f68d61227808779a93abf1c400342..0844b7c7576709c8a271142fd85f41b8007fc108 100644 (file)
@@ -1417,6 +1417,7 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
        struct qlcnic_fw_dump *fw_dump = &ahw->fw_dump;
        struct pci_dev *pdev = adapter->pdev;
        bool extended = false;
+       int ret;
 
        prev_version = adapter->fw_version;
        current_version = qlcnic_83xx_get_fw_version(adapter);
@@ -1427,8 +1428,11 @@ void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *adapter)
                if (qlcnic_83xx_md_check_extended_dump_capability(adapter))
                        extended = !qlcnic_83xx_extend_md_capab(adapter);
 
-               if (!qlcnic_fw_cmd_get_minidump_temp(adapter))
-                       dev_info(&pdev->dev, "Supports FW dump capability\n");
+               ret = qlcnic_fw_cmd_get_minidump_temp(adapter);
+               if (ret)
+                       return;
+
+               dev_info(&pdev->dev, "Supports FW dump capability\n");
 
                /* Once we have minidump template with extended iSCSI dump
                 * capability, update the minidump capture mask to 0x1f as
index 9e2a0bd8f5a88803b4d1e260e18ffff9f119d541..4277d0c12101fef796f7826508edec872f7a7a68 100644 (file)
@@ -1506,6 +1506,8 @@ static int ravb_close(struct net_device *ndev)
                priv->phydev = NULL;
        }
 
+       if (priv->chip_id == RCAR_GEN3)
+               free_irq(priv->emac_irq, ndev);
        free_irq(ndev->irq, ndev);
 
        napi_disable(&priv->napi[RAVB_NC]);
index 98d33d462c6ca0f6b78499cde0b8d3e2b51f8b07..1681084cc96f8270d36e55d9ede099a58632566a 100644 (file)
@@ -1920,6 +1920,10 @@ static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
                return 0;
        }
 
+       if (nic_data->datapath_caps &
+           1 << MC_CMD_GET_CAPABILITIES_OUT_RX_RSS_LIMITED_LBN)
+               return -EOPNOTSUPP;
+
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
                       nic_data->vport_id);
        MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
@@ -2923,9 +2927,16 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                                      bool replacing)
 {
        struct efx_ef10_nic_data *nic_data = efx->nic_data;
+       u32 flags = spec->flags;
 
        memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
 
+       /* Remove RSS flag if we don't have an RSS context. */
+       if (flags & EFX_FILTER_FLAG_RX_RSS &&
+           spec->rss_context == EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+           nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+               flags &= ~EFX_FILTER_FLAG_RX_RSS;
+
        if (replacing) {
                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
                               MC_CMD_FILTER_OP_IN_OP_REPLACE);
@@ -2985,10 +2996,10 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
                       spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
                       0 : spec->dmaq_id);
        MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
-                      (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+                      (flags & EFX_FILTER_FLAG_RX_RSS) ?
                       MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
                       MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
-       if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+       if (flags & EFX_FILTER_FLAG_RX_RSS)
                MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
                               spec->rss_context !=
                               EFX_FILTER_RSS_CONTEXT_DEFAULT ?
index bbb77cd8ad6776df47d01549cceb7165735c15f0..e2fcdf1eec441939af5e59dc2a581de2ebd02694 100644 (file)
@@ -367,7 +367,6 @@ struct cpsw_priv {
        spinlock_t                      lock;
        struct platform_device          *pdev;
        struct net_device               *ndev;
-       struct device_node              *phy_node;
        struct napi_struct              napi_rx;
        struct napi_struct              napi_tx;
        struct device                   *dev;
@@ -1148,25 +1147,34 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
                cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
                                   1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
 
-       if (priv->phy_node)
-               slave->phy = of_phy_connect(priv->ndev, priv->phy_node,
+       if (slave->data->phy_node) {
+               slave->phy = of_phy_connect(priv->ndev, slave->data->phy_node,
                                 &cpsw_adjust_link, 0, slave->data->phy_if);
-       else
+               if (!slave->phy) {
+                       dev_err(priv->dev, "phy \"%s\" not found on slave %d\n",
+                               slave->data->phy_node->full_name,
+                               slave->slave_num);
+                       return;
+               }
+       } else {
                slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
                                 &cpsw_adjust_link, slave->data->phy_if);
-       if (IS_ERR(slave->phy)) {
-               dev_err(priv->dev, "phy %s not found on slave %d\n",
-                       slave->data->phy_id, slave->slave_num);
-               slave->phy = NULL;
-       } else {
-               phy_attached_info(slave->phy);
+               if (IS_ERR(slave->phy)) {
+                       dev_err(priv->dev,
+                               "phy \"%s\" not found on slave %d, err %ld\n",
+                               slave->data->phy_id, slave->slave_num,
+                               PTR_ERR(slave->phy));
+                       slave->phy = NULL;
+                       return;
+               }
+       }
 
-               phy_start(slave->phy);
+       phy_attached_info(slave->phy);
 
-               /* Configure GMII_SEL register */
-               cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface,
-                            slave->slave_num);
-       }
+       phy_start(slave->phy);
+
+       /* Configure GMII_SEL register */
+       cpsw_phy_sel(&priv->pdev->dev, slave->phy->interface, slave->slave_num);
 }
 
 static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
@@ -1940,12 +1948,11 @@ static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
        slave->port_vlan = data->dual_emac_res_vlan;
 }
 
-static int cpsw_probe_dt(struct cpsw_priv *priv,
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
                         struct platform_device *pdev)
 {
        struct device_node *node = pdev->dev.of_node;
        struct device_node *slave_node;
-       struct cpsw_platform_data *data = &priv->data;
        int i = 0, ret;
        u32 prop;
 
@@ -2033,25 +2040,21 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
                if (strcmp(slave_node->name, "slave"))
                        continue;
 
-               priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+               slave_data->phy_node = of_parse_phandle(slave_node,
+                                                       "phy-handle", 0);
                parp = of_get_property(slave_node, "phy_id", &lenp);
-               if (of_phy_is_fixed_link(slave_node)) {
-                       struct device_node *phy_node;
-                       struct phy_device *phy_dev;
-
+               if (slave_data->phy_node) {
+                       dev_dbg(&pdev->dev,
+                               "slave[%d] using phy-handle=\"%s\"\n",
+                               i, slave_data->phy_node->full_name);
+               } else if (of_phy_is_fixed_link(slave_node)) {
                        /* In the case of a fixed PHY, the DT node associated
                         * to the PHY is the Ethernet MAC DT node.
                         */
                        ret = of_phy_register_fixed_link(slave_node);
                        if (ret)
                                return ret;
-                       phy_node = of_node_get(slave_node);
-                       phy_dev = of_phy_find_device(phy_node);
-                       if (!phy_dev)
-                               return -ENODEV;
-                       snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
-                                PHY_ID_FMT, phy_dev->mdio.bus->id,
-                                phy_dev->mdio.addr);
+                       slave_data->phy_node = of_node_get(slave_node);
                } else if (parp) {
                        u32 phyid;
                        struct device_node *mdio_node;
@@ -2072,7 +2075,9 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
                        snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                                 PHY_ID_FMT, mdio->name, phyid);
                } else {
-                       dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
+                       dev_err(&pdev->dev,
+                               "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+                               i);
                        goto no_phy_slave;
                }
                slave_data->phy_if = of_get_phy_mode(slave_node);
@@ -2275,7 +2280,7 @@ static int cpsw_probe(struct platform_device *pdev)
        /* Select default pin state */
        pinctrl_pm_select_default_state(&pdev->dev);
 
-       if (cpsw_probe_dt(priv, pdev)) {
+       if (cpsw_probe_dt(&priv->data, pdev)) {
                dev_err(&pdev->dev, "cpsw: platform data missing\n");
                ret = -ENODEV;
                goto clean_runtime_disable_ret;
index 442a7038e660c877b1cc7509efab7df214a25a87..e50afd1b2eda09d87a94b100646d774de52f32b5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/phy.h>
 
 struct cpsw_slave_data {
+       struct device_node *phy_node;
        char            phy_id[MII_BUS_ID_SIZE];
        int             phy_if;
        u8              mac_addr[ETH_ALEN];
index 58d58f002559f627040a8bfb3fa452f671c62e44..f56d66e6ec155194e1f432ee42950d9db915c697 100644 (file)
@@ -1512,7 +1512,10 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
 
        /* TODO: Add phy read and write and private statistics get feature */
 
-       return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+       if (priv->phydev)
+               return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+       else
+               return -EOPNOTSUPP;
 }
 
 static int match_first_device(struct device *dev, void *data)
index 13214a6492ac5b1eced4d39c21b7736f5dcf19d4..743b18266a7c2b2be2b33f2d3f7fea0f9b80e361 100644 (file)
@@ -1622,7 +1622,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
                        continue;
 
                /* copy hw scan info */
-               memcpy(target->hwinfo, scan_info, scan_info->size);
+               memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
                target->essid_len = strnlen(scan_info->essid,
                                            sizeof(scan_info->essid));
                target->rate_len = 0;
index bc168894bda32d0723f7d77aa475f59bd2c7d75a..7b0a644122ebf064c6058e2aa5bb453cec3d57d1 100644 (file)
@@ -504,8 +504,6 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
        int gh_len;
        int err = -ENOSYS;
 
-       udp_tunnel_gro_complete(skb, nhoff);
-
        gh = (struct genevehdr *)(skb->data + nhoff);
        gh_len = geneve_hlen(gh);
        type = gh->proto_type;
@@ -516,6 +514,9 @@ static int geneve_gro_complete(struct sk_buff *skb, int nhoff,
                err = ptype->callbacks.gro_complete(skb, nhoff + gh_len);
 
        rcu_read_unlock();
+
+       skb_set_inner_mac_header(skb, nhoff + gh_len);
+
        return err;
 }
 
index c6385617bfb29a38565e9708953c3024ae3af994..92eaab95ae2b1407b2177f96c63a17beb58753b9 100644 (file)
@@ -85,7 +85,7 @@ struct gcm_iv {
  * @tfm: crypto struct, key storage
  */
 struct macsec_key {
-       u64 id;
+       u8 id[MACSEC_KEYID_LEN];
        struct crypto_aead *tfm;
 };
 
@@ -1529,7 +1529,8 @@ static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
        [MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
        [MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
        [MACSEC_SA_ATTR_PN] = { .type = NLA_U32 },
-       [MACSEC_SA_ATTR_KEYID] = { .type = NLA_U64 },
+       [MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
+                                  .len = MACSEC_KEYID_LEN, },
        [MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
                                 .len = MACSEC_MAX_KEY_LEN, },
 };
@@ -1576,6 +1577,9 @@ static bool validate_add_rxsa(struct nlattr **attrs)
                        return false;
        }
 
+       if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
+               return false;
+
        return true;
 }
 
@@ -1641,7 +1645,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
        if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
                rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
 
-       rx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+       nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
        rx_sa->sc = rx_sc;
        rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
 
@@ -1722,6 +1726,9 @@ static bool validate_add_txsa(struct nlattr **attrs)
                        return false;
        }
 
+       if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
+               return false;
+
        return true;
 }
 
@@ -1777,7 +1784,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
                return -ENOMEM;
        }
 
-       tx_sa->key.id = nla_get_u64(tb_sa[MACSEC_SA_ATTR_KEYID]);
+       nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEY], MACSEC_KEYID_LEN);
 
        spin_lock_bh(&tx_sa->lock);
        tx_sa->next_pn = nla_get_u32(tb_sa[MACSEC_SA_ATTR_PN]);
@@ -2318,7 +2325,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
 
                if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
                    nla_put_u32(skb, MACSEC_SA_ATTR_PN, tx_sa->next_pn) ||
-                   nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, tx_sa->key.id) ||
+                   nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
                    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
                        nla_nest_cancel(skb, txsa_nest);
                        nla_nest_cancel(skb, txsa_list);
@@ -2419,7 +2426,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
 
                        if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
                            nla_put_u32(skb, MACSEC_SA_ATTR_PN, rx_sa->next_pn) ||
-                           nla_put_u64(skb, MACSEC_SA_ATTR_KEYID, rx_sa->key.id) ||
+                           nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
                            nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
                                nla_nest_cancel(skb, rxsa_nest);
                                nla_nest_cancel(skb, rxsc_nest);
index 95394edd1ed528fc9cf94fb39aadeaa28c47fb26..9a35aa4623142795cbcf6311e4d05a8e13e5776c 100644 (file)
@@ -373,7 +373,7 @@ static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
                        goto wake_up;
                }
 
-               kfree_skb(skb);
+               consume_skb(skb);
                while (segs) {
                        struct sk_buff *nskb = segs->next;
 
index b3ffaee3085885e02a2556f5d34517320a875f1b..f279a897a5c7fe0e875fb8b058f4c00ae3059f62 100644 (file)
@@ -359,27 +359,25 @@ static void at803x_link_change_notify(struct phy_device *phydev)
         * in the FIFO. In such cases, the FIFO enters an error mode it
         * cannot recover from by software.
         */
-       if (phydev->drv->phy_id == ATH8030_PHY_ID) {
-               if (phydev->state == PHY_NOLINK) {
-                       if (priv->gpiod_reset && !priv->phy_reset) {
-                               struct at803x_context context;
-
-                               at803x_context_save(phydev, &context);
-
-                               gpiod_set_value(priv->gpiod_reset, 1);
-                               msleep(1);
-                               gpiod_set_value(priv->gpiod_reset, 0);
-                               msleep(1);
-
-                               at803x_context_restore(phydev, &context);
-
-                               phydev_dbg(phydev, "%s(): phy was reset\n",
-                                          __func__);
-                               priv->phy_reset = true;
-                       }
-               } else {
-                       priv->phy_reset = false;
+       if (phydev->state == PHY_NOLINK) {
+               if (priv->gpiod_reset && !priv->phy_reset) {
+                       struct at803x_context context;
+
+                       at803x_context_save(phydev, &context);
+
+                       gpiod_set_value(priv->gpiod_reset, 1);
+                       msleep(1);
+                       gpiod_set_value(priv->gpiod_reset, 0);
+                       msleep(1);
+
+                       at803x_context_restore(phydev, &context);
+
+                       phydev_dbg(phydev, "%s(): phy was reset\n",
+                                  __func__);
+                       priv->phy_reset = true;
                }
+       } else {
+               priv->phy_reset = false;
        }
 }
 
@@ -391,7 +389,6 @@ static struct phy_driver at803x_driver[] = {
        .phy_id_mask            = 0xffffffef,
        .probe                  = at803x_probe,
        .config_init            = at803x_config_init,
-       .link_change_notify     = at803x_link_change_notify,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
@@ -427,7 +424,6 @@ static struct phy_driver at803x_driver[] = {
        .phy_id_mask            = 0xffffffef,
        .probe                  = at803x_probe,
        .config_init            = at803x_config_init,
-       .link_change_notify     = at803x_link_change_notify,
        .set_wol                = at803x_set_wol,
        .get_wol                = at803x_get_wol,
        .suspend                = at803x_suspend,
index 5590b9c182c967d80a186256162f30690c707506..445fc5aef308620b277fe0aaff268dff0a3d4e41 100644 (file)
@@ -790,9 +790,11 @@ void phy_start(struct phy_device *phydev)
                break;
        case PHY_HALTED:
                /* make sure interrupts are re-enabled for the PHY */
-               err = phy_enable_interrupts(phydev);
-               if (err < 0)
-                       break;
+               if (phydev->irq != PHY_POLL) {
+                       err = phy_enable_interrupts(phydev);
+                       if (err < 0)
+                               break;
+               }
 
                phydev->state = PHY_RESUMING;
                do_resume = true;
index f20890ee03f33368fd68c6b5fb82f8fd76fa4310..f64778ad97535118d5b0705c1a5afa04ab57677c 100644 (file)
@@ -269,6 +269,7 @@ struct skb_data {           /* skb->cb is one of these */
        struct lan78xx_net *dev;
        enum skb_state state;
        size_t length;
+       int num_of_packet;
 };
 
 struct usb_context {
@@ -1803,7 +1804,34 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
 
 static void lan78xx_link_status_change(struct net_device *net)
 {
-       /* nothing to do */
+       struct phy_device *phydev = net->phydev;
+       int ret, temp;
+
+       /* At forced 100 F/H mode, chip may fail to set mode correctly
+        * when cable is switched between long(~50+m) and short one.
+        * As workaround, set to 10 before setting to 100
+        * at forced 100 F/H mode.
+        */
+       if (!phydev->autoneg && (phydev->speed == 100)) {
+               /* disable phy interrupt */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_;
+               ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+
+               temp = phy_read(phydev, MII_BMCR);
+               temp &= ~(BMCR_SPEED100 | BMCR_SPEED1000);
+               phy_write(phydev, MII_BMCR, temp); /* set to 10 first */
+               temp |= BMCR_SPEED100;
+               phy_write(phydev, MII_BMCR, temp); /* set to 100 later */
+
+               /* clear pending interrupt generated while workaround */
+               temp = phy_read(phydev, LAN88XX_INT_STS);
+
+               /* enable phy interrupt back */
+               temp = phy_read(phydev, LAN88XX_INT_MASK);
+               temp |= LAN88XX_INT_MASK_MDINTPIN_EN_;
+               ret = phy_write(phydev, LAN88XX_INT_MASK, temp);
+       }
 }
 
 static int lan78xx_phy_init(struct lan78xx_net *dev)
@@ -2464,7 +2492,7 @@ static void tx_complete(struct urb *urb)
        struct lan78xx_net *dev = entry->dev;
 
        if (urb->status == 0) {
-               dev->net->stats.tx_packets++;
+               dev->net->stats.tx_packets += entry->num_of_packet;
                dev->net->stats.tx_bytes += entry->length;
        } else {
                dev->net->stats.tx_errors++;
@@ -2681,10 +2709,11 @@ void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
                return;
        }
 
-       skb->protocol = eth_type_trans(skb, dev->net);
        dev->net->stats.rx_packets++;
        dev->net->stats.rx_bytes += skb->len;
 
+       skb->protocol = eth_type_trans(skb, dev->net);
+
        netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
                  skb->len + sizeof(struct ethhdr), skb->protocol);
        memset(skb->cb, 0, sizeof(struct skb_data));
@@ -2934,13 +2963,16 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
 
        skb_totallen = 0;
        pkt_cnt = 0;
+       count = 0;
+       length = 0;
        for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
                if (skb_is_gso(skb)) {
                        if (pkt_cnt) {
                                /* handle previous packets first */
                                break;
                        }
-                       length = skb->len;
+                       count = 1;
+                       length = skb->len - TX_OVERHEAD;
                        skb2 = skb_dequeue(tqp);
                        goto gso_skb;
                }
@@ -2961,14 +2993,13 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
        for (count = pos = 0; count < pkt_cnt; count++) {
                skb2 = skb_dequeue(tqp);
                if (skb2) {
+                       length += (skb2->len - TX_OVERHEAD);
                        memcpy(skb->data + pos, skb2->data, skb2->len);
                        pos += roundup(skb2->len, sizeof(u32));
                        dev_kfree_skb(skb2);
                }
        }
 
-       length = skb_totallen;
-
 gso_skb:
        urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urb) {
@@ -2980,6 +3011,7 @@ gso_skb:
        entry->urb = urb;
        entry->dev = dev;
        entry->length = length;
+       entry->num_of_packet = count;
 
        spin_lock_irqsave(&dev->txq.lock, flags);
        ret = usb_autopm_get_interface_async(dev->intf);
index f840802159158b56a5b840d7abb83152fcd0c83f..82129eef77748f25ffe05f4754f4f6df81e3e8cd 100644 (file)
@@ -411,7 +411,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
        int ret;
 
        read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart);
-       data[0] = 0xc9;
+       data[0] = 0xc8; /* TX & RX enable, append status, no CRC */
        data[1] = 0;
        if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL))
                data[1] |= 0x20;        /* set full duplex */
@@ -497,7 +497,7 @@ static void read_bulk_callback(struct urb *urb)
                pkt_len = buf[count - 3] << 8;
                pkt_len += buf[count - 4];
                pkt_len &= 0xfff;
-               pkt_len -= 8;
+               pkt_len -= 4;
        }
 
        /*
@@ -528,7 +528,7 @@ static void read_bulk_callback(struct urb *urb)
 goon:
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
        rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
        if (rx_status == -ENODEV)
@@ -569,7 +569,7 @@ static void rx_fixup(unsigned long data)
        }
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
 try_again:
        status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC);
@@ -823,7 +823,7 @@ static int pegasus_open(struct net_device *net)
 
        usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb,
                          usb_rcvbulkpipe(pegasus->usb, 1),
-                         pegasus->rx_skb->data, PEGASUS_MTU + 8,
+                         pegasus->rx_skb->data, PEGASUS_MTU,
                          read_bulk_callback, pegasus);
        if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
                if (res == -ENODEV)
index 30033dbe666263f12ed05cbe7d2fe1e924b156e4..c369db99c005bd69e73217a60bfa87458ebeec9b 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc75xx.h"
 
 #define SMSC_CHIPNAME                  "smsc75xx"
@@ -761,6 +762,15 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 
 static void smsc75xx_init_mac_address(struct usbnet *dev)
 {
+       const u8 *mac_addr;
+
+       /* maybe the boot loader passed the MAC address in devicetree */
+       mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+       if (mac_addr) {
+               memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+               return;
+       }
+
        /* try reading mac address from EEPROM */
        if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
                        dev->net->dev_addr) == 0) {
@@ -772,7 +782,7 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
                }
        }
 
-       /* no eeprom, or eeprom values are invalid. generate random MAC */
+       /* no useful static MAC address found. generate a random one */
        eth_hw_addr_random(dev->net);
        netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
index 66b3ab9f614eb07edb05757ee333e998f7841f7d..2edc2bc6d1b9fb3201a5f4d564dffed93e30bddc 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
+#include <linux/of_net.h>
 #include "smsc95xx.h"
 
 #define SMSC_CHIPNAME                  "smsc95xx"
@@ -765,6 +766,15 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
 
 static void smsc95xx_init_mac_address(struct usbnet *dev)
 {
+       const u8 *mac_addr;
+
+       /* maybe the boot loader passed the MAC address in devicetree */
+       mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+       if (mac_addr) {
+               memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+               return;
+       }
+
        /* try reading mac address from EEPROM */
        if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
                        dev->net->dev_addr) == 0) {
@@ -775,7 +785,7 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
                }
        }
 
-       /* no eeprom, or eeprom values are invalid. generate random MAC */
+       /* no useful static MAC address found. generate a random one */
        eth_hw_addr_random(dev->net);
        netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
 }
index 1c0fa364323e280f2ed25c86bdcb8f3508a26c34..8ac261ab7d7dd5173afb111850aafe804a3a874d 100644 (file)
@@ -616,8 +616,9 @@ out:
 static int vxlan_gro_complete(struct sk_buff *skb, int nhoff,
                              struct udp_offload *uoff)
 {
-       udp_tunnel_gro_complete(skb, nhoff);
-
+       /* Sets 'skb->inner_mac_header' since we are always called with
+        * 'skb->encapsulation' set.
+        */
        return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
 }
 
index 8f8793004b9f021c7f689e13dabcf32f10e2c3a0..1b271b99c49eee91a96639ee019d370f61d0cda0 100644 (file)
@@ -274,6 +274,9 @@ void ar5008_hw_cmn_spur_mitigate(struct ath_hw *ah,
        };
        static const int inc[4] = { 0, 100, 0, 0 };
 
+       memset(&mask_m, 0, sizeof(int8_t) * 123);
+       memset(&mask_p, 0, sizeof(int8_t) * 123);
+
        cur_bin = -6000;
        upper = bin + 100;
        lower = bin - 100;
@@ -424,14 +427,9 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
        int tmp, new;
        int i;
 
-       int8_t mask_m[123];
-       int8_t mask_p[123];
        int cur_bb_spur;
        bool is2GHz = IS_CHAN_2GHZ(chan);
 
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
        for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
                cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
                if (AR_NO_SPUR == cur_bb_spur)
index db6624527d9959d3ffd10a32f3fa76cb0b6dff83..53d7445a5d12c1edaaf012e4e853560ec1f75e39 100644 (file)
@@ -178,14 +178,9 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
        int i;
        struct chan_centers centers;
 
-       int8_t mask_m[123];
-       int8_t mask_p[123];
        int cur_bb_spur;
        bool is2GHz = IS_CHAN_2GHZ(chan);
 
-       memset(&mask_m, 0, sizeof(int8_t) * 123);
-       memset(&mask_p, 0, sizeof(int8_t) * 123);
-
        ath9k_hw_get_channel_centers(ah, chan, &centers);
        freq = centers.synth_center;
 
index 97be104d12030f194a07ab67b5f0e0b8ee15f3e6..b5c57eebf995301e023e683fb672afb6876924b6 100644 (file)
@@ -93,7 +93,7 @@
 #define IWL8260_SMEM_OFFSET            0x400000
 #define IWL8260_SMEM_LEN               0x68000
 
-#define IWL8000_FW_PRE "iwlwifi-8000"
+#define IWL8000_FW_PRE "iwlwifi-8000C-"
 #define IWL8000_MODULE_FIRMWARE(api) \
        IWL8000_FW_PRE "-" __stringify(api) ".ucode"
 
index f899666acb41f44f56a326f5b0bfcff6dce88bde..9e45bf9c60715c523a2af0729713c8726783fa2c 100644 (file)
@@ -238,19 +238,6 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
        snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
                 name_pre, tag);
 
-       /*
-        * Starting 8000B - FW name format has changed. This overwrites the
-        * previous name and uses the new format.
-        */
-       if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
-               char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
-
-               if (rev_step != 'A')
-                       snprintf(drv->firmware_name,
-                                sizeof(drv->firmware_name), "%s%c-%s.ucode",
-                                name_pre, rev_step, tag);
-       }
-
        IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
                       (drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
                                ? "EXPERIMENTAL " : "",
@@ -1060,11 +1047,18 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
                return -EINVAL;
        }
 
-       if (WARN(fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
-                !gscan_capa,
-                "GSCAN is supported but capabilities TLV is unavailable\n"))
+       /*
+        * If ucode advertises that it supports GSCAN but GSCAN
+        * capabilities TLV is not present, or if it has an old format,
+        * warn and continue without GSCAN.
+        */
+       if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
+           !gscan_capa) {
+               IWL_DEBUG_INFO(drv,
+                              "GSCAN is supported but capabilities TLV is unavailable\n");
                __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
                            capa->_capa);
+       }
 
        return 0;
 
index 4856eac120f60d5eff2e9707e828e541761a76ae..6938cd37be57c6e48ae9efc4d559b58845fd705c 100644 (file)
@@ -526,7 +526,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
                file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len;
 
        /* Make room for fw's virtual image pages, if it exists */
-       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size)
+       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+           mvm->fw_paging_db[0].fw_paging_block)
                file_len += mvm->num_of_paging_blk *
                        (sizeof(*dump_data) +
                         sizeof(struct iwl_fw_error_dump_paging) +
@@ -643,7 +644,8 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
        }
 
        /* Dump fw's virtual image */
-       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size) {
+       if (mvm->fw->img[mvm->cur_ucode].paging_mem_size &&
+           mvm->fw_paging_db[0].fw_paging_block) {
                for (i = 1; i < mvm->num_of_paging_blk + 1; i++) {
                        struct iwl_fw_error_dump_paging *paging;
                        struct page *pages =
index 594cd0dc7df937d8b495f9c2864de13ed51f795d..09d895fafaf29dbf2de1cb824c0c02ae068acc96 100644 (file)
@@ -144,9 +144,11 @@ void iwl_free_fw_paging(struct iwl_mvm *mvm)
 
                __free_pages(mvm->fw_paging_db[i].fw_paging_block,
                             get_order(mvm->fw_paging_db[i].fw_paging_size));
+               mvm->fw_paging_db[i].fw_paging_block = NULL;
        }
        kfree(mvm->trans->paging_download_buf);
        mvm->trans->paging_download_buf = NULL;
+       mvm->trans->paging_db = NULL;
 
        memset(mvm->fw_paging_db, 0, sizeof(mvm->fw_paging_db));
 }
index 75870e68a7c344285ce4652318c721648a756121..34731e29c58946d22ebd02e9195ebaf6a9c247e2 100644 (file)
@@ -105,6 +105,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
                        struct iwl_tx_cmd *tx_cmd,
                        struct ieee80211_tx_info *info, u8 sta_id)
 {
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (void *)skb->data;
        __le16 fc = hdr->frame_control;
        u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
@@ -185,7 +186,7 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
        tx_cmd->tx_flags = cpu_to_le32(tx_flags);
        /* Total # bytes to be transmitted */
        tx_cmd->len = cpu_to_le16((u16)skb->len +
-               (uintptr_t)info->driver_data[0]);
+               (uintptr_t)skb_info->driver_data[0]);
        tx_cmd->next_frame_len = 0;
        tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
        tx_cmd->sta_id = sta_id;
@@ -327,10 +328,11 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
  */
 static struct iwl_device_cmd *
 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
-                     int hdrlen, struct ieee80211_sta *sta, u8 sta_id)
+                     struct ieee80211_tx_info *info, int hdrlen,
+                     struct ieee80211_sta *sta, u8 sta_id)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
 
@@ -350,10 +352,10 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
 
-       memset(&info->status, 0, sizeof(info->status));
-       memset(info->driver_data, 0, sizeof(info->driver_data));
+       memset(&skb_info->status, 0, sizeof(skb_info->status));
+       memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
 
-       info->driver_data[1] = dev_cmd;
+       skb_info->driver_data[1] = dev_cmd;
 
        return dev_cmd;
 }
@@ -361,22 +363,25 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info info;
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
        u8 sta_id;
        int hdrlen = ieee80211_hdrlen(hdr->frame_control);
 
-       if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU))
+       memcpy(&info, skb->cb, sizeof(info));
+
+       if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
                return -1;
 
-       if (WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
-                        (!info->control.vif ||
-                         info->hw_queue != info->control.vif->cab_queue)))
+       if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM &&
+                        (!info.control.vif ||
+                         info.hw_queue != info.control.vif->cab_queue)))
                return -1;
 
        /* This holds the amsdu headers length */
-       info->driver_data[0] = (void *)(uintptr_t)0;
+       skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
        /*
         * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
@@ -385,7 +390,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * and hence needs to be sent on the aux queue
         */
        if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
-           info->control.vif->type == NL80211_IFTYPE_STATION)
+           info.control.vif->type == NL80211_IFTYPE_STATION)
                IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
 
        /*
@@ -398,14 +403,14 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
         * AUX station.
         */
        sta_id = mvm->aux_sta.sta_id;
-       if (info->control.vif) {
+       if (info.control.vif) {
                struct iwl_mvm_vif *mvmvif =
-                       iwl_mvm_vif_from_mac80211(info->control.vif);
+                       iwl_mvm_vif_from_mac80211(info.control.vif);
 
-               if (info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
-                   info->control.vif->type == NL80211_IFTYPE_AP)
+               if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
+                   info.control.vif->type == NL80211_IFTYPE_AP)
                        sta_id = mvmvif->bcast_sta.sta_id;
-               else if (info->control.vif->type == NL80211_IFTYPE_STATION &&
+               else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
                         is_multicast_ether_addr(hdr->addr1)) {
                        u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
 
@@ -414,19 +419,18 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
                }
        }
 
-       IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info->hw_queue);
+       IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, NULL, sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
        if (!dev_cmd)
                return -1;
 
-       /* From now on, we cannot access info->control */
        tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
 
        /* Copy MAC header from skb into command buffer */
        memcpy(tx_cmd->hdr, hdr, hdrlen);
 
-       if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info->hw_queue)) {
+       if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
                iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
                return -1;
        }
@@ -445,11 +449,11 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
 
 #ifdef CONFIG_INET
 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff_head *mpdus_skb)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct ieee80211_hdr *hdr = (void *)skb->data;
        unsigned int mss = skb_shinfo(skb)->gso_size;
        struct sk_buff *tmp, *next;
@@ -544,6 +548,8 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
 
        /* This skb fits in one single A-MSDU */
        if (num_subframes * mss >= tcp_payload_len) {
+               struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+
                /*
                 * Compute the length of all the data added for the A-MSDU.
                 * This will be used to compute the length to write in the TX
@@ -552,11 +558,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
                 * already had one set of SNAP / IP / TCP headers.
                 */
                num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-               info = IEEE80211_SKB_CB(skb);
                amsdu_add = num_subframes * sizeof(struct ethhdr) +
                        (num_subframes - 1) * (snap_ip_tcp + pad);
                /* This holds the amsdu headers length */
-               info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+               skb_info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
 
                __skb_queue_tail(mpdus_skb, skb);
                return 0;
@@ -596,11 +601,14 @@ segment:
                        ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
 
                if (tcp_payload_len > mss) {
+                       struct ieee80211_tx_info *skb_info =
+                               IEEE80211_SKB_CB(tmp);
+
                        num_subframes = DIV_ROUND_UP(tcp_payload_len, mss);
-                       info = IEEE80211_SKB_CB(tmp);
                        amsdu_add = num_subframes * sizeof(struct ethhdr) +
                                (num_subframes - 1) * (snap_ip_tcp + pad);
-                       info->driver_data[0] = (void *)(uintptr_t)amsdu_add;
+                       skb_info->driver_data[0] =
+                               (void *)(uintptr_t)amsdu_add;
                        skb_shinfo(tmp)->gso_size = mss;
                } else {
                        qc = ieee80211_get_qos_ctl((void *)tmp->data);
@@ -622,6 +630,7 @@ segment:
 }
 #else /* CONFIG_INET */
 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
+                         struct ieee80211_tx_info *info,
                          struct ieee80211_sta *sta,
                          struct sk_buff_head *mpdus_skb)
 {
@@ -636,10 +645,10 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
  * Sets the fields in the Tx cmd that are crypto related
  */
 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
+                          struct ieee80211_tx_info *info,
                           struct ieee80211_sta *sta)
 {
        struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
        struct iwl_mvm_sta *mvmsta;
        struct iwl_device_cmd *dev_cmd;
        struct iwl_tx_cmd *tx_cmd;
@@ -660,7 +669,8 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
 
-       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, hdrlen, sta, mvmsta->sta_id);
+       dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
+                                       sta, mvmsta->sta_id);
        if (!dev_cmd)
                goto drop;
 
@@ -736,7 +746,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
                   struct ieee80211_sta *sta)
 {
        struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
+       struct ieee80211_tx_info info;
        struct sk_buff_head mpdus_skbs;
        unsigned int payload_len;
        int ret;
@@ -747,21 +758,23 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        if (WARN_ON_ONCE(mvmsta->sta_id == IWL_MVM_STATION_COUNT))
                return -1;
 
+       memcpy(&info, skb->cb, sizeof(info));
+
        /* This holds the amsdu headers length */
-       info->driver_data[0] = (void *)(uintptr_t)0;
+       skb_info->driver_data[0] = (void *)(uintptr_t)0;
 
        if (!skb_is_gso(skb))
-               return iwl_mvm_tx_mpdu(mvm, skb, sta);
+               return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
        payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
                tcp_hdrlen(skb) + skb->data_len;
 
        if (payload_len <= skb_shinfo(skb)->gso_size)
-               return iwl_mvm_tx_mpdu(mvm, skb, sta);
+               return iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
 
        __skb_queue_head_init(&mpdus_skbs);
 
-       ret = iwl_mvm_tx_tso(mvm, skb, sta, &mpdus_skbs);
+       ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
        if (ret)
                return ret;
 
@@ -771,7 +784,7 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
        while (!skb_queue_empty(&mpdus_skbs)) {
                skb = __skb_dequeue(&mpdus_skbs);
 
-               ret = iwl_mvm_tx_mpdu(mvm, skb, sta);
+               ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta);
                if (ret) {
                        __skb_queue_purge(&mpdus_skbs);
                        return ret;
index 05b968506836af92426ee275d8efb35dc78c64cb..79d7cd7d461e49afda911356928b253f3c0ab794 100644 (file)
@@ -479,8 +479,18 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
        {IWL_PCI_DEVICE(0x24F3, 0x0930, iwl8260_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24F3, 0x0000, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x1010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0050, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x0150, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9010, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8050, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x8010, iwl8265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x24FD, 0x0810, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x9110, iwl8265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x24FD, 0x8130, iwl8265_2ac_cfg)},
 
 /* 9000 Series */
        {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl5165_2ac_cfg)},
index b42f26029225fd69d9c57fe3088b31fdb5ab39bb..4412a57ec862287b1acad6ed486b7d7b2bfe1c1c 100644 (file)
@@ -711,6 +711,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
                if (cons == end)
                        break;
                RING_COPY_REQUEST(&queue->tx, cons++, txp);
+               extra_count = 0; /* only the first frag can have extras */
        } while (1);
        queue->tx.req_cons = cons;
 }
index f798899338eddb6955c06f5b44c26c405dffdade..92f536596b24295f0b19aea551844eecb95f7df6 100644 (file)
@@ -397,10 +397,17 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
         */
        start += start_pad;
        npfns = (pmem->size - start_pad - end_trunc - SZ_8K) / SZ_4K;
-       if (nd_pfn->mode == PFN_MODE_PMEM)
-               offset = ALIGN(start + SZ_8K + 64 * npfns, nd_pfn->align)
+       if (nd_pfn->mode == PFN_MODE_PMEM) {
+               unsigned long memmap_size;
+
+               /*
+                * vmemmap_populate_hugepages() allocates the memmap array in
+                * PMD_SIZE chunks.
+                */
+               memmap_size = ALIGN(64 * npfns, PMD_SIZE);
+               offset = ALIGN(start + SZ_8K + memmap_size, nd_pfn->align)
                        - start;
-       else if (nd_pfn->mode == PFN_MODE_RAM)
+       else if (nd_pfn->mode == PFN_MODE_RAM)
                offset = ALIGN(start + SZ_8K, nd_pfn->align) - start;
        else
                goto err;
index 8ba19bba31569f22c6529a739fa1d30adf5c2f0f..2bb3c5799ac4b0146b2760d8823fb1eb28a6bdaa 100644 (file)
@@ -94,7 +94,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
        if (ret)
                goto close_banks;
 
-       while (val_size) {
+       while (val_size >= reg_size) {
                if ((offset < OCOTP_DATA_OFFSET) || (offset % 16)) {
                        /* fill up non-data register */
                        *buf = 0;
@@ -103,7 +103,7 @@ static int mxs_ocotp_read(void *context, const void *reg, size_t reg_size,
                }
 
                buf++;
-               val_size--;
+               val_size -= reg_size;
                offset += reg_size;
        }
 
index 6c9f5467bc5f84e65fbe6e55d0e03693d260e535..dd7cdbee8029d5a51a60c63bb24a5772c0b84de9 100644 (file)
@@ -294,7 +294,7 @@ void pci_bus_add_device(struct pci_dev *dev)
 
        dev->match_driver = true;
        retval = device_attach(&dev->dev);
-       if (retval < 0) {
+       if (retval < 0 && retval != -EPROBE_DEFER) {
                dev_warn(&dev->dev, "device attach failed (%d)\n", retval);
                pci_proc_detach_device(dev);
                pci_remove_sysfs_dev_files(dev);
@@ -324,7 +324,9 @@ void pci_bus_add_devices(const struct pci_bus *bus)
        }
 
        list_for_each_entry(dev, &bus->devices, bus_list) {
-               BUG_ON(!dev->is_added);
+               /* Skip if device attach failed */
+               if (!dev->is_added)
+                       continue;
                child = dev->subordinate;
                if (child)
                        pci_bus_add_devices(child);
index 4429312e848dba2af05ddb3d9da6cb3b2b176248..2c447130b954fa15421858a03dfe7cdf887de80c 100644 (file)
@@ -722,9 +722,11 @@ static int atmel_conf_pin_config_group_set(struct pinctrl_dev *pctldev,
                        break;
                case PIN_CONFIG_BIAS_PULL_UP:
                        conf |= ATMEL_PIO_PUEN_MASK;
+                       conf &= (~ATMEL_PIO_PDEN_MASK);
                        break;
                case PIN_CONFIG_BIAS_PULL_DOWN:
                        conf |= ATMEL_PIO_PDEN_MASK;
+                       conf &= (~ATMEL_PIO_PUEN_MASK);
                        break;
                case PIN_CONFIG_DRIVE_OPEN_DRAIN:
                        if (arg == 0)
index 96168b8190449ccc1bee6e24ef0758af1bb3c69c..e165b7ce29d7dda18c6d84306a73cdba28818b6c 100644 (file)
@@ -126,7 +126,7 @@ struct rio_mport_mapping {
        struct list_head node;
        struct mport_dev *md;
        enum rio_mport_map_dir dir;
-       u32 rioid;
+       u16 rioid;
        u64 rio_addr;
        dma_addr_t phys_addr; /* for mmap */
        void *virt_addr; /* kernel address, for dma_free_coherent */
@@ -137,7 +137,7 @@ struct rio_mport_mapping {
 
 struct rio_mport_dma_map {
        int valid;
-       uint64_t length;
+       u64 length;
        void *vaddr;
        dma_addr_t paddr;
 };
@@ -208,7 +208,7 @@ struct mport_cdev_priv {
        struct kfifo            event_fifo;
        wait_queue_head_t       event_rx_wait;
        spinlock_t              fifo_lock;
-       unsigned int            event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
+       u32                     event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
 #ifdef CONFIG_RAPIDIO_DMA_ENGINE
        struct dma_chan         *dmach;
        struct list_head        async_list;
@@ -276,7 +276,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
                return -EFAULT;
 
        if ((maint_io.offset % 4) ||
-           (maint_io.length == 0) || (maint_io.length % 4))
+           (maint_io.length == 0) || (maint_io.length % 4) ||
+           (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
                return -EINVAL;
 
        buffer = vmalloc(maint_io.length);
@@ -298,7 +299,8 @@ static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
                offset += 4;
        }
 
-       if (unlikely(copy_to_user(maint_io.buffer, buffer, maint_io.length)))
+       if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
+                                  buffer, maint_io.length)))
                ret = -EFAULT;
 out:
        vfree(buffer);
@@ -319,7 +321,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
                return -EFAULT;
 
        if ((maint_io.offset % 4) ||
-           (maint_io.length == 0) || (maint_io.length % 4))
+           (maint_io.length == 0) || (maint_io.length % 4) ||
+           (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
                return -EINVAL;
 
        buffer = vmalloc(maint_io.length);
@@ -327,7 +330,8 @@ static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
                return -ENOMEM;
        length = maint_io.length;
 
-       if (unlikely(copy_from_user(buffer, maint_io.buffer, length))) {
+       if (unlikely(copy_from_user(buffer,
+                       (void __user *)(uintptr_t)maint_io.buffer, length))) {
                ret = -EFAULT;
                goto out;
        }
@@ -360,7 +364,7 @@ out:
  */
 static int
 rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
-                                 u32 rioid, u64 raddr, u32 size,
+                                 u16 rioid, u64 raddr, u32 size,
                                  dma_addr_t *paddr)
 {
        struct rio_mport *mport = md->mport;
@@ -369,7 +373,7 @@ rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
 
        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -394,7 +398,7 @@ err_map_outb:
 
 static int
 rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
-                              u32 rioid, u64 raddr, u32 size,
+                              u16 rioid, u64 raddr, u32 size,
                               dma_addr_t *paddr)
 {
        struct rio_mport_mapping *map;
@@ -433,7 +437,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
        dma_addr_t paddr;
        int ret;
 
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
@@ -448,7 +452,7 @@ static int rio_mport_obw_map(struct file *filp, void __user *arg)
 
        map.handle = paddr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap))))
+       if (unlikely(copy_to_user(arg, &map, sizeof(map))))
                return -EFAULT;
        return 0;
 }
@@ -469,7 +473,7 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
        if (!md->mport->ops->unmap_outb)
                return -EPROTONOSUPPORT;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
 
        rmcd_debug(OBW, "h=0x%llx", handle);
@@ -498,9 +502,9 @@ static int rio_mport_obw_free(struct file *filp, void __user *arg)
 static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint16_t hdid;
+       u16 hdid;
 
-       if (copy_from_user(&hdid, arg, sizeof(uint16_t)))
+       if (copy_from_user(&hdid, arg, sizeof(hdid)))
                return -EFAULT;
 
        md->mport->host_deviceid = hdid;
@@ -520,9 +524,9 @@ static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
 static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint32_t comptag;
+       u32 comptag;
 
-       if (copy_from_user(&comptag, arg, sizeof(uint32_t)))
+       if (copy_from_user(&comptag, arg, sizeof(comptag)))
                return -EFAULT;
 
        rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
@@ -837,7 +841,7 @@ err_out:
  * @xfer: data transfer descriptor structure
  */
 static int
-rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
+rio_dma_transfer(struct file *filp, u32 transfer_mode,
                 enum rio_transfer_sync sync, enum dma_data_direction dir,
                 struct rio_transfer_io *xfer)
 {
@@ -875,7 +879,7 @@ rio_dma_transfer(struct file *filp, uint32_t transfer_mode,
                unsigned long offset;
                long pinned;
 
-               offset = (unsigned long)xfer->loc_addr & ~PAGE_MASK;
+               offset = (unsigned long)(uintptr_t)xfer->loc_addr & ~PAGE_MASK;
                nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
 
                page_list = kmalloc_array(nr_pages,
@@ -1015,19 +1019,20 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
        if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
                return -EFAULT;
 
-       if (transaction.count != 1)
+       if (transaction.count != 1) /* only single transfer for now */
                return -EINVAL;
 
        if ((transaction.transfer_mode &
             priv->md->properties.transfer_mode) == 0)
                return -ENODEV;
 
-       transfer = vmalloc(transaction.count * sizeof(struct rio_transfer_io));
+       transfer = vmalloc(transaction.count * sizeof(*transfer));
        if (!transfer)
                return -ENOMEM;
 
-       if (unlikely(copy_from_user(transfer, transaction.block,
-             transaction.count * sizeof(struct rio_transfer_io)))) {
+       if (unlikely(copy_from_user(transfer,
+                                   (void __user *)(uintptr_t)transaction.block,
+                                   transaction.count * sizeof(*transfer)))) {
                ret = -EFAULT;
                goto out_free;
        }
@@ -1038,8 +1043,9 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
                ret = rio_dma_transfer(filp, transaction.transfer_mode,
                        transaction.sync, dir, &transfer[i]);
 
-       if (unlikely(copy_to_user(transaction.block, transfer,
-             transaction.count * sizeof(struct rio_transfer_io))))
+       if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
+                                 transfer,
+                                 transaction.count * sizeof(*transfer))))
                ret = -EFAULT;
 
 out_free:
@@ -1129,11 +1135,11 @@ err_tmo:
 }
 
 static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
-                       uint64_t size, struct rio_mport_mapping **mapping)
+                       u64 size, struct rio_mport_mapping **mapping)
 {
        struct rio_mport_mapping *map;
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -1165,7 +1171,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
        struct rio_mport_mapping *mapping = NULL;
        int ret;
 
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_dma_mem))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
@@ -1174,7 +1180,7 @@ static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
 
        map.dma_handle = mapping->phys_addr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_dma_mem)))) {
+       if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
                mutex_lock(&md->buf_mutex);
                kref_put(&mapping->ref, mport_release_mapping);
                mutex_unlock(&md->buf_mutex);
@@ -1192,7 +1198,7 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
        int ret = -EFAULT;
        struct rio_mport_mapping *map, *_map;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
        rmcd_debug(EXIT, "filp=%p", filp);
 
@@ -1242,14 +1248,18 @@ static int rio_mport_free_dma(struct file *filp, void __user *arg)
 
 static int
 rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
-                               u64 raddr, u32 size,
+                               u64 raddr, u64 size,
                                struct rio_mport_mapping **mapping)
 {
        struct rio_mport *mport = md->mport;
        struct rio_mport_mapping *map;
        int ret;
 
-       map = kzalloc(sizeof(struct rio_mport_mapping), GFP_KERNEL);
+       /* rio_map_inb_region() accepts u32 size */
+       if (size > 0xffffffff)
+               return -EINVAL;
+
+       map = kzalloc(sizeof(*map), GFP_KERNEL);
        if (map == NULL)
                return -ENOMEM;
 
@@ -1262,7 +1272,7 @@ rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
 
        if (raddr == RIO_MAP_ANY_ADDR)
                raddr = map->phys_addr;
-       ret = rio_map_inb_region(mport, map->phys_addr, raddr, size, 0);
+       ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
        if (ret < 0)
                goto err_map_inb;
 
@@ -1288,7 +1298,7 @@ err_dma_alloc:
 
 static int
 rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
-                             u64 raddr, u32 size,
+                             u64 raddr, u64 size,
                              struct rio_mport_mapping **mapping)
 {
        struct rio_mport_mapping *map;
@@ -1331,7 +1341,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
 
        if (!md->mport->ops->map_inb)
                return -EPROTONOSUPPORT;
-       if (unlikely(copy_from_user(&map, arg, sizeof(struct rio_mmap))))
+       if (unlikely(copy_from_user(&map, arg, sizeof(map))))
                return -EFAULT;
 
        rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
@@ -1344,7 +1354,7 @@ static int rio_mport_map_inbound(struct file *filp, void __user *arg)
        map.handle = mapping->phys_addr;
        map.rio_addr = mapping->rio_addr;
 
-       if (unlikely(copy_to_user(arg, &map, sizeof(struct rio_mmap)))) {
+       if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
                /* Delete mapping if it was created by this request */
                if (ret == 0 && mapping->filp == filp) {
                        mutex_lock(&md->buf_mutex);
@@ -1375,7 +1385,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
        if (!md->mport->ops->unmap_inb)
                return -EPROTONOSUPPORT;
 
-       if (copy_from_user(&handle, arg, sizeof(u64)))
+       if (copy_from_user(&handle, arg, sizeof(handle)))
                return -EFAULT;
 
        mutex_lock(&md->buf_mutex);
@@ -1401,7 +1411,7 @@ static int rio_mport_inbound_free(struct file *filp, void __user *arg)
 static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
 {
        struct mport_dev *md = priv->md;
-       uint32_t port_idx = md->mport->index;
+       u32 port_idx = md->mport->index;
 
        rmcd_debug(MPORT, "port_index=%d", port_idx);
 
@@ -1451,7 +1461,7 @@ static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
        handled = 0;
        spin_lock(&data->db_lock);
        list_for_each_entry(db_filter, &data->doorbells, data_node) {
-               if (((db_filter->filter.rioid == 0xffffffff ||
+               if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
                      db_filter->filter.rioid == src)) &&
                      info >= db_filter->filter.low &&
                      info <= db_filter->filter.high) {
@@ -1525,6 +1535,9 @@ static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
        if (copy_from_user(&filter, arg, sizeof(filter)))
                return -EFAULT;
 
+       if (filter.low > filter.high)
+               return -EINVAL;
+
        spin_lock_irqsave(&priv->md->db_lock, flags);
        list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
                if (db_filter->filter.rioid == filter.rioid &&
@@ -1737,10 +1750,10 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
                return -EEXIST;
        }
 
-       size = sizeof(struct rio_dev);
+       size = sizeof(*rdev);
        mport = md->mport;
-       destid = (u16)dev_info.destid;
-       hopcount = (u8)dev_info.hopcount;
+       destid = dev_info.destid;
+       hopcount = dev_info.hopcount;
 
        if (rio_mport_read_config_32(mport, destid, hopcount,
                                     RIO_PEF_CAR, &rval))
@@ -1872,8 +1885,8 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
                do {
                        rdev = rio_get_comptag(dev_info.comptag, rdev);
                        if (rdev && rdev->dev.parent == &mport->net->dev &&
-                           rdev->destid == (u16)dev_info.destid &&
-                           rdev->hopcount == (u8)dev_info.hopcount)
+                           rdev->destid == dev_info.destid &&
+                           rdev->hopcount == dev_info.hopcount)
                                break;
                } while (rdev);
        }
@@ -2146,8 +2159,8 @@ static long mport_cdev_ioctl(struct file *filp,
                return maint_port_idx_get(data, (void __user *)arg);
        case RIO_MPORT_GET_PROPERTIES:
                md->properties.hdid = md->mport->host_deviceid;
-               if (copy_to_user((void __user *)arg, &(data->md->properties),
-                                sizeof(data->md->properties)))
+               if (copy_to_user((void __user *)arg, &(md->properties),
+                                sizeof(md->properties)))
                        return -EFAULT;
                return 0;
        case RIO_ENABLE_DOORBELL_RANGE:
@@ -2159,11 +2172,11 @@ static long mport_cdev_ioctl(struct file *filp,
        case RIO_DISABLE_PORTWRITE_RANGE:
                return rio_mport_remove_pw_filter(data, (void __user *)arg);
        case RIO_SET_EVENT_MASK:
-               data->event_mask = arg;
+               data->event_mask = (u32)arg;
                return 0;
        case RIO_GET_EVENT_MASK:
                if (copy_to_user((void __user *)arg, &data->event_mask,
-                                   sizeof(data->event_mask)))
+                                   sizeof(u32)))
                        return -EFAULT;
                return 0;
        case RIO_MAP_OUTBOUND:
@@ -2374,7 +2387,7 @@ static ssize_t mport_write(struct file *filp, const char __user *buf,
                        return -EINVAL;
 
                ret = rio_mport_send_doorbell(mport,
-                                             (u16)event.u.doorbell.rioid,
+                                             event.u.doorbell.rioid,
                                              event.u.doorbell.payload);
                if (ret < 0)
                        return ret;
@@ -2421,7 +2434,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
        struct mport_dev *md;
        struct rio_mport_attr attr;
 
-       md = kzalloc(sizeof(struct mport_dev), GFP_KERNEL);
+       md = kzalloc(sizeof(*md), GFP_KERNEL);
        if (!md) {
                rmcd_error("Unable allocate a device object");
                return NULL;
@@ -2470,7 +2483,7 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
        /* The transfer_mode property will be returned through mport query
         * interface
         */
-#ifdef CONFIG_PPC /* for now: only on Freescale's SoCs */
+#ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
        md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
 #else
        md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
index 40cd894e4df5e3d11f26442692dd9c0076d932af..514a5e8fdbab3e757b470daa6e579ebe489fa0f7 100644 (file)
@@ -157,7 +157,9 @@ static struct regulator_ops axp20x_ops_sw = {
 static const struct regulator_linear_range axp20x_ldo4_ranges[] = {
        REGULATOR_LINEAR_RANGE(1250000, 0x0, 0x0, 0),
        REGULATOR_LINEAR_RANGE(1300000, 0x1, 0x8, 100000),
-       REGULATOR_LINEAR_RANGE(2500000, 0x9, 0xf, 100000),
+       REGULATOR_LINEAR_RANGE(2500000, 0x9, 0x9, 0),
+       REGULATOR_LINEAR_RANGE(2700000, 0xa, 0xb, 100000),
+       REGULATOR_LINEAR_RANGE(3000000, 0xc, 0xf, 100000),
 };
 
 static const struct regulator_desc axp20x_regulators[] = {
@@ -215,10 +217,14 @@ static const struct regulator_desc axp22x_regulators[] = {
                 AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)),
        AXP_DESC(AXP22X, ELDO3, "eldo3", "eldoin", 700, 3300, 100,
                 AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)),
-       AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 1800, 3300, 100,
+       /* Note the datasheet only guarantees reliable operation up to
+        * 3.3V, this needs to be enforced via dts provided constraints */
+       AXP_DESC_IO(AXP22X, LDO_IO0, "ldo_io0", "ips", 700, 3800, 100,
                    AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07,
                    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
-       AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 1800, 3300, 100,
+       /* Note the datasheet only guarantees reliable operation up to
+        * 3.3V, this needs to be enforced via dts provided constraints */
+       AXP_DESC_IO(AXP22X, LDO_IO1, "ldo_io1", "ips", 700, 3800, 100,
                    AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07,
                    AXP22X_IO_ENABLED, AXP22X_IO_DISABLED),
        AXP_DESC_FIXED(AXP22X, RTC_LDO, "rtc_ldo", "ips", 3000),
index ed9e7e96f8777a291341e1dd45fccc413bee0f43..c6af343f54eac5c59b3e06923e3b4141413b833e 100644 (file)
@@ -900,4 +900,4 @@ module_exit(da9063_regulator_cleanup);
 MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>");
 MODULE_DESCRIPTION("DA9063 regulators driver");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("paltform:" DA9063_DRVNAME_REGULATORS);
+MODULE_ALIAS("platform:" DA9063_DRVNAME_REGULATORS);
index a8718e98674a273939f20d06627be943aab6aee7..83e89e5d47526c17be6d728fcc7a651f45aec4b0 100644 (file)
@@ -162,6 +162,8 @@ of_get_gpio_regulator_config(struct device *dev, struct device_node *np,
        of_property_read_u32(np, "startup-delay-us", &config->startup_delay);
 
        config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0);
+       if (config->enable_gpio == -EPROBE_DEFER)
+               return ERR_PTR(-EPROBE_DEFER);
 
        /* Fetch GPIOs. - optional property*/
        ret = of_gpio_count(np);
index d24e2c783dc5c9a093220c4e98980813ad9e309f..6dfa3502e1f1a5ed7532d15157856c30e8d3bb7c 100644 (file)
@@ -308,7 +308,7 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
-#define regulator_desc_s2mps11_buck6_10(num, min, step) {      \
+#define regulator_desc_s2mps11_buck67810(num, min, step) {     \
        .name           = "BUCK"#num,                           \
        .id             = S2MPS11_BUCK##num,                    \
        .ops            = &s2mps11_buck_ops,                    \
@@ -324,6 +324,22 @@ static struct regulator_ops s2mps11_buck_ops = {
        .enable_mask    = S2MPS11_ENABLE_MASK                   \
 }
 
+#define regulator_desc_s2mps11_buck9 {                         \
+       .name           = "BUCK9",                              \
+       .id             = S2MPS11_BUCK9,                        \
+       .ops            = &s2mps11_buck_ops,                    \
+       .type           = REGULATOR_VOLTAGE,                    \
+       .owner          = THIS_MODULE,                          \
+       .min_uV         = MIN_3000_MV,                          \
+       .uV_step        = STEP_25_MV,                           \
+       .n_voltages     = S2MPS11_BUCK9_N_VOLTAGES,             \
+       .ramp_delay     = S2MPS11_RAMP_DELAY,                   \
+       .vsel_reg       = S2MPS11_REG_B9CTRL2,                  \
+       .vsel_mask      = S2MPS11_BUCK9_VSEL_MASK,              \
+       .enable_reg     = S2MPS11_REG_B9CTRL1,                  \
+       .enable_mask    = S2MPS11_ENABLE_MASK                   \
+}
+
 static const struct regulator_desc s2mps11_regulators[] = {
        regulator_desc_s2mps11_ldo(1, STEP_25_MV),
        regulator_desc_s2mps11_ldo(2, STEP_50_MV),
@@ -368,11 +384,11 @@ static const struct regulator_desc s2mps11_regulators[] = {
        regulator_desc_s2mps11_buck1_4(3),
        regulator_desc_s2mps11_buck1_4(4),
        regulator_desc_s2mps11_buck5,
-       regulator_desc_s2mps11_buck6_10(6, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck6_10(7, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck6_10(8, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps11_buck6_10(9, MIN_3000_MV, STEP_25_MV),
-       regulator_desc_s2mps11_buck6_10(10, MIN_750_MV, STEP_12_5_MV),
+       regulator_desc_s2mps11_buck67810(6, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps11_buck67810(7, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps11_buck67810(8, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps11_buck9,
+       regulator_desc_s2mps11_buck67810(10, MIN_750_MV, STEP_12_5_MV),
 };
 
 static struct regulator_ops s2mps14_reg_ops;
index 8eaed0522aa36e83ddbe259a4fe2293c5ac61579..a655cf29c16f7d40367a859fb11f483e28252219 100644 (file)
@@ -532,6 +532,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
                return SCSI_DH_DEV_TEMP_BUSY;
 
  retry:
+       err = 0;
        retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags);
 
        if (retval) {
index 5d0ec42a9317d6a08a2b99fd424339c4554f4787..634254a523013a557327a7b38825924de7e71b6c 100644 (file)
@@ -4214,7 +4214,7 @@ static struct scsi_host_template qla1280_driver_template = {
        .eh_bus_reset_handler   = qla1280_eh_bus_reset,
        .eh_host_reset_handler  = qla1280_eh_adapter_reset,
        .bios_param             = qla1280_biosparam,
-       .can_queue              = 0xfffff,
+       .can_queue              = MAX_OUTSTANDING_COMMANDS,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .use_clustering         = ENABLE_CLUSTERING,
index 39412c9097c6a240466c51c941ec890a4612542e..c1a2d747b24686cb835541c68bb2f20f2304d009 100644 (file)
@@ -385,8 +385,8 @@ static int dspi_transfer_one_message(struct spi_master *master,
                dspi->cur_chip = spi_get_ctldata(spi);
                dspi->cs = spi->chip_select;
                dspi->cs_change = 0;
-               if (dspi->cur_transfer->transfer_list.next
-                               == &dspi->cur_msg->transfers)
+               if (list_is_last(&dspi->cur_transfer->transfer_list,
+                                &dspi->cur_msg->transfers) || transfer->cs_change)
                        dspi->cs_change = 1;
                dspi->void_write_data = dspi->cur_chip->void_write_data;
 
index 43a02e377b3b168339013d797f8da7c150959c58..0caa3c8bef46c46e0ed66bf89f518cc5c5236449 100644 (file)
@@ -423,12 +423,16 @@ static void omap2_mcspi_tx_dma(struct spi_device *spi,
 
        if (mcspi_dma->dma_tx) {
                struct dma_async_tx_descriptor *tx;
+               struct scatterlist sg;
 
                dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
 
-               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, xfer->tx_sg.sgl,
-                                            xfer->tx_sg.nents, DMA_MEM_TO_DEV,
-                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               sg_init_table(&sg, 1);
+               sg_dma_address(&sg) = xfer->tx_dma;
+               sg_dma_len(&sg) = xfer->len;
+
+               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+               DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_tx_callback;
                        tx->callback_param = spi;
@@ -474,15 +478,20 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
 
        if (mcspi_dma->dma_rx) {
                struct dma_async_tx_descriptor *tx;
+               struct scatterlist sg;
 
                dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
 
                if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
                        dma_count -= es;
 
-               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, xfer->rx_sg.sgl,
-                                            xfer->rx_sg.nents, DMA_DEV_TO_MEM,
-                                            DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               sg_init_table(&sg, 1);
+               sg_dma_address(&sg) = xfer->rx_dma;
+               sg_dma_len(&sg) = dma_count;
+
+               tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+                               DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
+                               DMA_CTRL_ACK);
                if (tx) {
                        tx->callback = omap2_mcspi_rx_callback;
                        tx->callback_param = spi;
@@ -496,6 +505,8 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
        omap2_mcspi_set_dma_req(spi, 1, 1);
 
        wait_for_completion(&mcspi_dma->dma_rx_completion);
+       dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
+                        DMA_FROM_DEVICE);
 
        if (mcspi->fifo_depth > 0)
                return count;
@@ -608,6 +619,8 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
 
        if (tx != NULL) {
                wait_for_completion(&mcspi_dma->dma_tx_completion);
+               dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
+                                DMA_TO_DEVICE);
 
                if (mcspi->fifo_depth > 0) {
                        irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@@ -1074,16 +1087,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
                gpio_free(spi->cs_gpio);
 }
 
-static bool omap2_mcspi_can_dma(struct spi_master *master,
-                               struct spi_device *spi,
-                               struct spi_transfer *xfer)
-{
-       if (xfer->len < DMA_MIN_BYTES)
-               return false;
-
-       return true;
-}
-
 static int omap2_mcspi_work_one(struct omap2_mcspi *mcspi,
                struct spi_device *spi, struct spi_transfer *t)
 {
@@ -1265,6 +1268,32 @@ static int omap2_mcspi_transfer_one(struct spi_master *master,
                return -EINVAL;
        }
 
+       if (len < DMA_MIN_BYTES)
+               goto skip_dma_map;
+
+       if (mcspi_dma->dma_tx && tx_buf != NULL) {
+               t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
+                               len, DMA_TO_DEVICE);
+               if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
+                       dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+                                       'T', len);
+                       return -EINVAL;
+               }
+       }
+       if (mcspi_dma->dma_rx && rx_buf != NULL) {
+               t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
+                               DMA_FROM_DEVICE);
+               if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
+                       dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+                                       'R', len);
+                       if (tx_buf != NULL)
+                               dma_unmap_single(mcspi->dev, t->tx_dma,
+                                               len, DMA_TO_DEVICE);
+                       return -EINVAL;
+               }
+       }
+
+skip_dma_map:
        return omap2_mcspi_work_one(mcspi, spi, t);
 }
 
@@ -1348,7 +1377,6 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
        master->transfer_one = omap2_mcspi_transfer_one;
        master->set_cs = omap2_mcspi_set_cs;
        master->cleanup = omap2_mcspi_cleanup;
-       master->can_dma = omap2_mcspi_can_dma;
        master->dev.of_node = node;
        master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
        master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
index 85e59a406a4c01fabef55679c5ffb9b9583bd852..86138e4101b07f990634d06af60b4c5a63ce4607 100644 (file)
@@ -126,7 +126,7 @@ static const struct lpss_config lpss_platforms[] = {
                .reg_general = -1,
                .reg_ssp = 0x20,
                .reg_cs_ctrl = 0x24,
-               .reg_capabilities = 0xfc,
+               .reg_capabilities = -1,
                .rx_threshold = 1,
                .tx_threshold_lo = 32,
                .tx_threshold_hi = 56,
index eac3c960b2decb8c4aa73f73894b16ac3021beba..443f664534e144fd388e2baba7f4d9bb49134772 100644 (file)
@@ -94,6 +94,7 @@ struct ti_qspi {
 #define QSPI_FLEN(n)                   ((n - 1) << 0)
 #define QSPI_WLEN_MAX_BITS             128
 #define QSPI_WLEN_MAX_BYTES            16
+#define QSPI_WLEN_MASK                 QSPI_WLEN(QSPI_WLEN_MAX_BITS)
 
 /* STATUS REGISTER */
 #define BUSY                           0x01
@@ -235,16 +236,16 @@ static inline int ti_qspi_poll_wc(struct ti_qspi *qspi)
        return  -ETIMEDOUT;
 }
 
-static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+                         int count)
 {
-       int wlen, count, xfer_len;
+       int wlen, xfer_len;
        unsigned int cmd;
        const u8 *txbuf;
        u32 data;
 
        txbuf = t->tx_buf;
        cmd = qspi->cmd | QSPI_WR_SNGL;
-       count = t->len;
        wlen = t->bits_per_word >> 3;   /* in bytes */
        xfer_len = wlen;
 
@@ -304,9 +305,10 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        return 0;
 }
 
-static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+                        int count)
 {
-       int wlen, count;
+       int wlen;
        unsigned int cmd;
        u8 *rxbuf;
 
@@ -323,7 +325,6 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
                cmd |= QSPI_RD_SNGL;
                break;
        }
-       count = t->len;
        wlen = t->bits_per_word >> 3;   /* in bytes */
 
        while (count) {
@@ -354,12 +355,13 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        return 0;
 }
 
-static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t,
+                            int count)
 {
        int ret;
 
        if (t->tx_buf) {
-               ret = qspi_write_msg(qspi, t);
+               ret = qspi_write_msg(qspi, t, count);
                if (ret) {
                        dev_dbg(qspi->dev, "Error while writing\n");
                        return ret;
@@ -367,7 +369,7 @@ static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
        }
 
        if (t->rx_buf) {
-               ret = qspi_read_msg(qspi, t);
+               ret = qspi_read_msg(qspi, t, count);
                if (ret) {
                        dev_dbg(qspi->dev, "Error while reading\n");
                        return ret;
@@ -450,7 +452,8 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
        struct spi_device *spi = m->spi;
        struct spi_transfer *t;
        int status = 0, ret;
-       int frame_length;
+       unsigned int frame_len_words, transfer_len_words;
+       int wlen;
 
        /* setup device control reg */
        qspi->dc = 0;
@@ -462,14 +465,15 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
        if (spi->mode & SPI_CS_HIGH)
                qspi->dc |= QSPI_CSPOL(spi->chip_select);
 
-       frame_length = (m->frame_length << 3) / spi->bits_per_word;
-
-       frame_length = clamp(frame_length, 0, QSPI_FRAME);
+       frame_len_words = 0;
+       list_for_each_entry(t, &m->transfers, transfer_list)
+               frame_len_words += t->len / (t->bits_per_word >> 3);
+       frame_len_words = min_t(unsigned int, frame_len_words, QSPI_FRAME);
 
        /* setup command reg */
        qspi->cmd = 0;
        qspi->cmd |= QSPI_EN_CS(spi->chip_select);
-       qspi->cmd |= QSPI_FLEN(frame_length);
+       qspi->cmd |= QSPI_FLEN(frame_len_words);
 
        ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
 
@@ -479,16 +483,23 @@ static int ti_qspi_start_transfer_one(struct spi_master *master,
                ti_qspi_disable_memory_map(spi);
 
        list_for_each_entry(t, &m->transfers, transfer_list) {
-               qspi->cmd |= QSPI_WLEN(t->bits_per_word);
+               qspi->cmd = ((qspi->cmd & ~QSPI_WLEN_MASK) |
+                            QSPI_WLEN(t->bits_per_word));
+
+               wlen = t->bits_per_word >> 3;
+               transfer_len_words = min(t->len / wlen, frame_len_words);
 
-               ret = qspi_transfer_msg(qspi, t);
+               ret = qspi_transfer_msg(qspi, t, transfer_len_words * wlen);
                if (ret) {
                        dev_dbg(qspi->dev, "transfer message failed\n");
                        mutex_unlock(&qspi->list_lock);
                        return -EINVAL;
                }
 
-               m->actual_length += t->len;
+               m->actual_length += transfer_len_words * wlen;
+               frame_len_words -= transfer_len_words;
+               if (frame_len_words == 0)
+                       break;
        }
 
        mutex_unlock(&qspi->list_lock);
index 14718a9ffcfb18d398f8ab5ae2cb3df99d4994db..460c855be0d020bd21d3cc024d0a33d705b0d59d 100644 (file)
@@ -249,18 +249,12 @@ static int usb_port_runtime_suspend(struct device *dev)
 
        return retval;
 }
-
-static int usb_port_prepare(struct device *dev)
-{
-       return 1;
-}
 #endif
 
 static const struct dev_pm_ops usb_port_pm_ops = {
 #ifdef CONFIG_PM
        .runtime_suspend =      usb_port_runtime_suspend,
        .runtime_resume =       usb_port_runtime_resume,
-       .prepare =              usb_port_prepare,
 #endif
 };
 
index dcb85e3cd5a76ab40e963f0e6dd402dfb63fa219..479187c32571d7efa9e49dccb3ea7d386102fd13 100644 (file)
@@ -312,13 +312,7 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 static int usb_dev_prepare(struct device *dev)
 {
-       struct usb_device *udev = to_usb_device(dev);
-
-       /* Return 0 if the current wakeup setting is wrong, otherwise 1 */
-       if (udev->do_remote_wakeup != device_may_wakeup(dev))
-               return 0;
-
-       return 1;
+       return 0;               /* Implement eventually? */
 }
 
 static void usb_dev_complete(struct device *dev)
index 5e5a8fa005f8befc1ca9eb4814b12afbf8e3ccea..bc8889956d172b293e6771b2cd7e1e604005b2f1 100644 (file)
@@ -83,9 +83,9 @@ static int jz4740_musb_init(struct musb *musb)
 {
        usb_phy_generic_register();
        musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
-       if (!musb->xceiv) {
+       if (IS_ERR(musb->xceiv)) {
                pr_err("HS UDC: no transceiver configured\n");
-               return -ENODEV;
+               return PTR_ERR(musb->xceiv);
        }
 
        /* Silicon does not implement ConfigData register.
index 87bd578799a8a9eb47339177cfc63a5b3bba5a5d..152865b3652295caf52340ca86397d5494f34c84 100644 (file)
@@ -1164,12 +1164,12 @@ static int musb_gadget_disable(struct usb_ep *ep)
                musb_writew(epio, MUSB_RXMAXP, 0);
        }
 
-       musb_ep->desc = NULL;
-       musb_ep->end_point.desc = NULL;
-
        /* abort all pending DMA and requests */
        nuke(musb_ep, -ESHUTDOWN);
 
+       musb_ep->desc = NULL;
+       musb_ep->end_point.desc = NULL;
+
        schedule_work(&musb->irq_work);
 
        spin_unlock_irqrestore(&(musb->lock), flags);
index 58487a4735218518ce50326750ca8718aaa2a516..2f8ad7f1f482cd55116b8cb3a7fd575fc79c360a 100644 (file)
@@ -2735,7 +2735,7 @@ static const struct hc_driver musb_hc_driver = {
        .description            = "musb-hcd",
        .product_desc           = "MUSB HDRC host driver",
        .hcd_priv_size          = sizeof(struct musb *),
-       .flags                  = HCD_USB2 | HCD_MEMORY | HCD_BH,
+       .flags                  = HCD_USB2 | HCD_MEMORY,
 
        /* not using irq handler or reset hooks from usbcore, since
         * those must be shared with peripheral code for OTG configs
index dd47823bb014c4664fed94181b33fbcd5f8d6e85..7c9f25e9c422a278ae71ea37de6a5c367a6e58cf 100644 (file)
@@ -109,6 +109,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
        { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
        { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+       { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
        { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
        { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
        { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -118,6 +119,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */
        { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */
        { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
+       { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
        { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
        { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -141,6 +143,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
        { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
        { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+       { USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
+       { USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
        { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
        { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */
        { USB_DEVICE(0x166A, 0x0201) }, /* Clipsal 5500PACA C-Bus Pascal Automation Controller */
index 983280e8d93f3df4030c737978402ada4deaed10..e5a391aecde1b4c94a71181dacf665f99579bab1 100644 (file)
@@ -761,7 +761,7 @@ config FB_VESA
 
 config FB_EFI
        bool "EFI-based Framebuffer Support"
-       depends on (FB = y) && X86 && EFI
+       depends on (FB = y) && !IA64 && EFI
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
index 95d293b7445a83473bc2131cf657f5de14ef52b3..f4c045c0051cc65fbcdb1df6207f4e3292b8c327 100644 (file)
@@ -6,16 +6,14 @@
  *
  */
 
-#include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/efi.h>
 #include <linux/errno.h>
 #include <linux/fb.h>
 #include <linux/platform_device.h>
 #include <linux/screen_info.h>
-#include <linux/dmi.h>
-#include <linux/pci.h>
 #include <video/vga.h>
-#include <asm/sysfb.h>
+#include <asm/efi.h>
 
 static bool request_mem_succeeded = false;
 
@@ -85,21 +83,13 @@ static struct fb_ops efifb_ops = {
 static int efifb_setup(char *options)
 {
        char *this_opt;
-       int i;
 
        if (options && *options) {
                while ((this_opt = strsep(&options, ",")) != NULL) {
                        if (!*this_opt) continue;
 
-                       for (i = 0; i < M_UNKNOWN; i++) {
-                               if (efifb_dmi_list[i].base != 0 &&
-                                   !strcmp(this_opt, efifb_dmi_list[i].optname)) {
-                                       screen_info.lfb_base = efifb_dmi_list[i].base;
-                                       screen_info.lfb_linelength = efifb_dmi_list[i].stride;
-                                       screen_info.lfb_width = efifb_dmi_list[i].width;
-                                       screen_info.lfb_height = efifb_dmi_list[i].height;
-                               }
-                       }
+                       efifb_setup_from_dmi(&screen_info, this_opt);
+
                        if (!strncmp(this_opt, "base:", 5))
                                screen_info.lfb_base = simple_strtoul(this_opt+5, NULL, 0);
                        else if (!strncmp(this_opt, "stride:", 7))
@@ -338,5 +328,4 @@ static struct platform_driver efifb_driver = {
        .remove = efifb_remove,
 };
 
-module_platform_driver(efifb_driver);
-MODULE_LICENSE("GPL");
+builtin_platform_driver(efifb_driver);
index 5c802d47892c5091c971eaef256310f99b4d175d..ca6bfddaacad2beeb1897a0e397bd5300ccff30d 100644 (file)
@@ -1006,7 +1006,7 @@ struct virtqueue *vring_create_virtqueue(
        const char *name)
 {
        struct virtqueue *vq;
-       void *queue;
+       void *queue = NULL;
        dma_addr_t dma_addr;
        size_t queue_size_in_bytes;
        struct vring vring;
index 9781e0dd59d6fc6a1b02da5e51e516e27ccafad1..d46839f51e730ff50e2c756a9f4b1326af82e2ec 100644 (file)
@@ -151,6 +151,8 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
 
+static void release_memory_resource(struct resource *resource);
+
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -267,6 +269,20 @@ static struct resource *additional_memory_resource(phys_addr_t size)
                return NULL;
        }
 
+#ifdef CONFIG_SPARSEMEM
+       {
+               unsigned long limit = 1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT);
+               unsigned long pfn = res->start >> PAGE_SHIFT;
+
+               if (pfn > limit) {
+                       pr_err("New System RAM resource outside addressable RAM (%lu > %lu)\n",
+                              pfn, limit);
+                       release_memory_resource(res);
+                       return NULL;
+               }
+       }
+#endif
+
        return res;
 }
 
index be7e56a338e84dba95b9e42cddc7db3d15f224a8..e9d2135445c13a493f3193a2c2ccb545a2afbfab 100644 (file)
@@ -316,7 +316,6 @@ static const struct efi efi_xen __initconst = {
        .get_next_high_mono_count = xen_efi_get_next_high_mono_count,
        .reset_system             = NULL, /* Functionality provided by Xen. */
        .set_virtual_address_map  = NULL, /* Not used under Xen. */
-       .memmap                   = NULL, /* Not used under Xen. */
        .flags                    = 0     /* Initialized later. */
 };
 
index 38272ad245516c272ab571bed1a5c962f4a0e808..f4edd6df3df235c55aef1329cb8e17b5ae8cf9f7 100644 (file)
@@ -316,7 +316,6 @@ static int evtchn_resize_ring(struct per_user_data *u)
 {
        unsigned int new_size;
        evtchn_port_t *new_ring, *old_ring;
-       unsigned int p, c;
 
        /*
         * Ensure the ring is large enough to capture all possible
@@ -346,20 +345,17 @@ static int evtchn_resize_ring(struct per_user_data *u)
        /*
         * Copy the old ring contents to the new ring.
         *
-        * If the ring contents crosses the end of the current ring,
-        * it needs to be copied in two chunks.
+        * To take care of wrapping, a full ring, and the new index
+        * pointing into the second half, simply copy the old contents
+        * twice.
         *
         * +---------+    +------------------+
-        * |34567  12| -> |       1234567    |
-        * +-----p-c-+    +------------------+
+        * |34567  12| -> |34567  1234567  12|
+        * +-----p-c-+    +-------c------p---+
         */
-       p = evtchn_ring_offset(u, u->ring_prod);
-       c = evtchn_ring_offset(u, u->ring_cons);
-       if (p < c) {
-               memcpy(new_ring + c, u->ring + c, (u->ring_size - c) * sizeof(*u->ring));
-               memcpy(new_ring + u->ring_size, u->ring, p * sizeof(*u->ring));
-       } else
-               memcpy(new_ring + c, u->ring + c, (p - c) * sizeof(*u->ring));
+       memcpy(new_ring, old_ring, u->ring_size * sizeof(*u->ring));
+       memcpy(new_ring + u->ring_size, old_ring,
+              u->ring_size * sizeof(*u->ring));
 
        u->ring = new_ring;
        u->ring_size = new_size;
index feef8a9c4de7cf09bcbc0effb45f83c207c9015b..f02404052b7b66c73809f8bc33c93f07acab4a9b 100644 (file)
@@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx)
                .sb = inode->i_sb,
        };
        lower_file = ecryptfs_file_to_lower(file);
-       lower_file->f_pos = ctx->pos;
        rc = iterate_dir(lower_file, &buf.ctx);
        ctx->pos = buf.ctx.pos;
        if (rc < 0)
@@ -223,14 +222,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
        }
        ecryptfs_set_file_lower(
                file, ecryptfs_inode_to_private(inode)->lower_file);
-       if (d_is_dir(ecryptfs_dentry)) {
-               ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
-               mutex_lock(&crypt_stat->cs_mutex);
-               crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-               mutex_unlock(&crypt_stat->cs_mutex);
-               rc = 0;
-               goto out;
-       }
        rc = read_or_initialize_metadata(ecryptfs_dentry);
        if (rc)
                goto out_put;
@@ -247,6 +238,45 @@ out:
        return rc;
 }
 
+/**
+ * ecryptfs_dir_open
+ * @inode: inode speciying file to open
+ * @file: Structure to return filled in
+ *
+ * Opens the file specified by inode.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_dir_open(struct inode *inode, struct file *file)
+{
+       struct dentry *ecryptfs_dentry = file->f_path.dentry;
+       /* Private value of ecryptfs_dentry allocated in
+        * ecryptfs_lookup() */
+       struct ecryptfs_file_info *file_info;
+       struct file *lower_file;
+
+       /* Released in ecryptfs_release or end of function if failure */
+       file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
+       ecryptfs_set_file_private(file, file_info);
+       if (unlikely(!file_info)) {
+               ecryptfs_printk(KERN_ERR,
+                               "Error attempting to allocate memory\n");
+               return -ENOMEM;
+       }
+       lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry),
+                                file->f_flags, current_cred());
+       if (IS_ERR(lower_file)) {
+               printk(KERN_ERR "%s: Error attempting to initialize "
+                       "the lower file for the dentry with name "
+                       "[%pd]; rc = [%ld]\n", __func__,
+                       ecryptfs_dentry, PTR_ERR(lower_file));
+               kmem_cache_free(ecryptfs_file_info_cache, file_info);
+               return PTR_ERR(lower_file);
+       }
+       ecryptfs_set_file_lower(file, lower_file);
+       return 0;
+}
+
 static int ecryptfs_flush(struct file *file, fl_owner_t td)
 {
        struct file *lower_file = ecryptfs_file_to_lower(file);
@@ -267,6 +297,19 @@ static int ecryptfs_release(struct inode *inode, struct file *file)
        return 0;
 }
 
+static int ecryptfs_dir_release(struct inode *inode, struct file *file)
+{
+       fput(ecryptfs_file_to_lower(file));
+       kmem_cache_free(ecryptfs_file_info_cache,
+                       ecryptfs_file_to_private(file));
+       return 0;
+}
+
+static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence)
+{
+       return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence);
+}
+
 static int
 ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
@@ -346,20 +389,16 @@ const struct file_operations ecryptfs_dir_fops = {
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
 #endif
-       .open = ecryptfs_open,
-       .flush = ecryptfs_flush,
-       .release = ecryptfs_release,
+       .open = ecryptfs_dir_open,
+       .release = ecryptfs_dir_release,
        .fsync = ecryptfs_fsync,
-       .fasync = ecryptfs_fasync,
-       .splice_read = generic_file_splice_read,
-       .llseek = default_llseek,
+       .llseek = ecryptfs_dir_llseek,
 };
 
 const struct file_operations ecryptfs_main_fops = {
        .llseek = generic_file_llseek,
        .read_iter = ecryptfs_read_update_atime,
        .write_iter = generic_file_write_iter,
-       .iterate = ecryptfs_readdir,
        .unlocked_ioctl = ecryptfs_unlocked_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl = ecryptfs_compat_ioctl,
index d48e0d261d78da6e1ce2830e65000e37ed4bd292..5f22e74bbadea0822e3f6f5378b732ab3c1e333d 100644 (file)
@@ -157,7 +157,7 @@ efivarfs_ioc_setxflags(struct file *file, void __user *arg)
        return 0;
 }
 
-long
+static long
 efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
 {
        void __user *arg = (void __user *)p;
index 553c5d2db4a442757b1224d3d12b77d4bc20caee..9cb54a38832de664b3ced0c4b876ececb7c907dd 100644 (file)
@@ -216,8 +216,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
 
        INIT_LIST_HEAD(&efivarfs_list);
 
-       err = efivar_init(efivarfs_callback, (void *)sb, false,
-                         true, &efivarfs_list);
+       err = efivar_init(efivarfs_callback, (void *)sb, true, &efivarfs_list);
        if (err)
                __efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL, NULL);
 
index 719924d6c7062bf9af20a0ecd561a77841954b17..dcad5e2105252fa277a1c66ef83d8a960d2a082b 100644 (file)
@@ -1295,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
 
        *nbytesp = nbytes;
 
-       return ret;
+       return ret < 0 ? ret : 0;
 }
 
 static inline int fuse_iter_npages(const struct iov_iter *ii_p)
index 5384ceb35b1cc829442d6aa440db19c553a6eed8..98b3eb7d8eaf64d5eb006801aeb130ff8503f593 100644 (file)
@@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de,
        int retnamlen = 0;
        int truncate = 0;
        int ret = 0;
+       char *p;
+       int len;
 
        if (!ISOFS_SB(inode->i_sb)->s_rock)
                return 0;
@@ -267,12 +269,17 @@ repeat:
                                        rr->u.NM.flags);
                                break;
                        }
-                       if ((strlen(retname) + rr->len - 5) >= 254) {
+                       len = rr->len - 5;
+                       if (retnamlen + len >= 254) {
                                truncate = 1;
                                break;
                        }
-                       strncat(retname, rr->u.NM.name, rr->len - 5);
-                       retnamlen += rr->len - 5;
+                       p = memchr(rr->u.NM.name, '\0', len);
+                       if (unlikely(p))
+                               len = p - rr->u.NM.name;
+                       memcpy(retname + retnamlen, rr->u.NM.name, len);
+                       retnamlen += len;
+                       retname[retnamlen] = '\0';
                        break;
                case SIG('R', 'E'):
                        kfree(rs.buffer);
index 03b688d19f6964010c27c16759520315892c780d..37f9678ae4df5c191e1870de88beff21395d4839 100644 (file)
@@ -153,9 +153,9 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
        p = buf + len + nlen;
        *p = '\0';
        for (kn = kn_to; kn != common; kn = kn->parent) {
-               nlen = strlen(kn->name);
-               p -= nlen;
-               memcpy(p, kn->name, nlen);
+               size_t tmp = strlen(kn->name);
+               p -= tmp;
+               memcpy(p, kn->name, tmp);
                *(--p) = '/';
        }
 
index f73541fbe7afadaee17dcb038f7be18c21b4d18f..3d670a3678f2dd6dc24dc9db23ca9cd5a63ab0ed 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/slab.h>
 #include <linux/pagemap.h>
 #include <linux/namei.h>
+#include <linux/seq_file.h>
 
 #include "kernfs-internal.h"
 
@@ -40,6 +41,19 @@ static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry)
        return 0;
 }
 
+static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry)
+{
+       struct kernfs_node *node = dentry->d_fsdata;
+       struct kernfs_root *root = kernfs_root(node);
+       struct kernfs_syscall_ops *scops = root->syscall_ops;
+
+       if (scops && scops->show_path)
+               return scops->show_path(sf, node, root);
+
+       seq_dentry(sf, dentry, " \t\n\\");
+       return 0;
+}
+
 const struct super_operations kernfs_sops = {
        .statfs         = simple_statfs,
        .drop_inode     = generic_delete_inode,
@@ -47,6 +61,7 @@ const struct super_operations kernfs_sops = {
 
        .remount_fs     = kernfs_sop_remount_fs,
        .show_options   = kernfs_sop_show_options,
+       .show_path      = kernfs_sop_show_path,
 };
 
 /**
index 1d9ca2d5dff68ee184bf5dbe6d0d417f1b63f245..42f8ca038254078f4cb37d1827f9c3b2da8c85ba 100644 (file)
@@ -1794,30 +1794,49 @@ static inline unsigned int fold_hash(unsigned long hash)
        return hash_64(hash, 32);
 }
 
+/*
+ * This is George Marsaglia's XORSHIFT generator.
+ * It implements a maximum-period LFSR in only a few
+ * instructions.  It also has the property (required
+ * by hash_name()) that mix_hash(0) = 0.
+ */
+static inline unsigned long mix_hash(unsigned long hash)
+{
+       hash ^= hash << 13;
+       hash ^= hash >> 7;
+       hash ^= hash << 17;
+       return hash;
+}
+
 #else  /* 32-bit case */
 
 #define fold_hash(x) (x)
 
+static inline unsigned long mix_hash(unsigned long hash)
+{
+       hash ^= hash << 13;
+       hash ^= hash >> 17;
+       hash ^= hash << 5;
+       return hash;
+}
+
 #endif
 
 unsigned int full_name_hash(const unsigned char *name, unsigned int len)
 {
-       unsigned long a, mask;
-       unsigned long hash = 0;
+       unsigned long a, hash = 0;
 
        for (;;) {
                a = load_unaligned_zeropad(name);
                if (len < sizeof(unsigned long))
                        break;
-               hash += a;
-               hash *= 9;
+               hash = mix_hash(hash + a);
                name += sizeof(unsigned long);
                len -= sizeof(unsigned long);
                if (!len)
                        goto done;
        }
-       mask = bytemask_from_count(len);
-       hash += mask & a;
+       hash += a & bytemask_from_count(len);
 done:
        return fold_hash(hash);
 }
@@ -1835,7 +1854,7 @@ static inline u64 hash_name(const char *name)
        hash = a = 0;
        len = -sizeof(unsigned long);
        do {
-               hash = (hash + a) * 9;
+               hash = mix_hash(hash + a);
                len += sizeof(unsigned long);
                a = load_unaligned_zeropad(name+len);
                b = a ^ REPEAT_BYTE('/');
@@ -2266,6 +2285,33 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
 }
 EXPORT_SYMBOL(vfs_path_lookup);
 
+/**
+ * lookup_hash - lookup single pathname component on already hashed name
+ * @name:      name and hash to lookup
+ * @base:      base directory to lookup from
+ *
+ * The name must have been verified and hashed (see lookup_one_len()).  Using
+ * this after just full_name_hash() is unsafe.
+ *
+ * This function also doesn't check for search permission on base directory.
+ *
+ * Use lookup_one_len_unlocked() instead, unless you really know what you are
+ * doing.
+ *
+ * Do not hold i_mutex; this helper takes i_mutex if necessary.
+ */
+struct dentry *lookup_hash(const struct qstr *name, struct dentry *base)
+{
+       struct dentry *ret;
+
+       ret = lookup_dcache(name, base, 0);
+       if (!ret)
+               ret = lookup_slow(name, base, 0);
+
+       return ret;
+}
+EXPORT_SYMBOL(lookup_hash);
+
 /**
  * lookup_one_len - filesystem helper to lookup single pathname component
  * @name:      pathname component to lookup
@@ -2337,7 +2383,6 @@ struct dentry *lookup_one_len_unlocked(const char *name,
        struct qstr this;
        unsigned int c;
        int err;
-       struct dentry *ret;
 
        this.name = name;
        this.len = len;
@@ -2369,10 +2414,7 @@ struct dentry *lookup_one_len_unlocked(const char *name,
        if (err)
                return ERR_PTR(err);
 
-       ret = lookup_dcache(&this, base, 0);
-       if (!ret)
-               ret = lookup_slow(&this, base, 0);
-       return ret;
+       return lookup_hash(&this, base);
 }
 EXPORT_SYMBOL(lookup_one_len_unlocked);
 
@@ -2942,22 +2984,10 @@ no_open:
                dentry = lookup_real(dir, dentry, nd->flags);
                if (IS_ERR(dentry))
                        return PTR_ERR(dentry);
-
-               if (create_error) {
-                       int open_flag = op->open_flag;
-
-                       error = create_error;
-                       if ((open_flag & O_EXCL)) {
-                               if (!dentry->d_inode)
-                                       goto out;
-                       } else if (!dentry->d_inode) {
-                               goto out;
-                       } else if ((open_flag & O_TRUNC) &&
-                                  d_is_reg(dentry)) {
-                               goto out;
-                       }
-                       /* will fail later, go on to get the right error */
-               }
+       }
+       if (create_error && !dentry->d_inode) {
+               error = create_error;
+               goto out;
        }
 looked_up:
        path->dentry = dentry;
@@ -4213,7 +4243,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
        bool new_is_dir = false;
        unsigned max_links = new_dir->i_sb->s_max_links;
 
-       if (source == target)
+       /*
+        * Check source == target.
+        * On overlayfs need to look at underlying inodes.
+        */
+       if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0))
                return 0;
 
        error = may_delete(old_dir, old_dentry, is_dir);
index 0cdf497c91efbb915512aceed2bf58acaa37fa1d..2162434728c022ab4651904b778c21d958ca802d 100644 (file)
@@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
        brelse(di_bh);
        return acl;
 }
+
+int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
+{
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct posix_acl *acl;
+       int ret;
+
+       if (S_ISLNK(inode->i_mode))
+               return -EOPNOTSUPP;
+
+       if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
+               return 0;
+
+       acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
+       if (IS_ERR(acl) || !acl)
+               return PTR_ERR(acl);
+       ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
+       if (ret)
+               return ret;
+       ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS,
+                           acl, NULL, NULL);
+       posix_acl_release(acl);
+       return ret;
+}
+
+/*
+ * Initialize the ACLs of a new inode. If parent directory has default ACL,
+ * then clone to new inode. Called from ocfs2_mknod.
+ */
+int ocfs2_init_acl(handle_t *handle,
+                  struct inode *inode,
+                  struct inode *dir,
+                  struct buffer_head *di_bh,
+                  struct buffer_head *dir_bh,
+                  struct ocfs2_alloc_context *meta_ac,
+                  struct ocfs2_alloc_context *data_ac)
+{
+       struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
+       struct posix_acl *acl = NULL;
+       int ret = 0, ret2;
+       umode_t mode;
+
+       if (!S_ISLNK(inode->i_mode)) {
+               if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
+                       acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
+                                                  dir_bh);
+                       if (IS_ERR(acl))
+                               return PTR_ERR(acl);
+               }
+               if (!acl) {
+                       mode = inode->i_mode & ~current_umask();
+                       ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+                       if (ret) {
+                               mlog_errno(ret);
+                               goto cleanup;
+                       }
+               }
+       }
+       if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
+               if (S_ISDIR(inode->i_mode)) {
+                       ret = ocfs2_set_acl(handle, inode, di_bh,
+                                           ACL_TYPE_DEFAULT, acl,
+                                           meta_ac, data_ac);
+                       if (ret)
+                               goto cleanup;
+               }
+               mode = inode->i_mode;
+               ret = __posix_acl_create(&acl, GFP_NOFS, &mode);
+               if (ret < 0)
+                       return ret;
+
+               ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+               if (ret2) {
+                       mlog_errno(ret2);
+                       ret = ret2;
+                       goto cleanup;
+               }
+               if (ret > 0) {
+                       ret = ocfs2_set_acl(handle, inode,
+                                           di_bh, ACL_TYPE_ACCESS,
+                                           acl, meta_ac, data_ac);
+               }
+       }
+cleanup:
+       posix_acl_release(acl);
+       return ret;
+}
index 3fce68d086251a6e26ea9805361e3a1ccb351d46..2783a75b3999e3c6a548bc44c4645ac882048ae6 100644 (file)
@@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle,
                         struct posix_acl *acl,
                         struct ocfs2_alloc_context *meta_ac,
                         struct ocfs2_alloc_context *data_ac);
+extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *);
+extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *,
+                         struct buffer_head *, struct buffer_head *,
+                         struct ocfs2_alloc_context *,
+                         struct ocfs2_alloc_context *);
 
 #endif /* OCFS2_ACL_H */
index 5308841756be24f6e682e361a35f1f7db00f9568..59cce53c91d810e208ef92896e7e2a9ae5806a27 100644 (file)
@@ -1268,20 +1268,20 @@ bail_unlock_rw:
        if (size_change)
                ocfs2_rw_unlock(inode, 1);
 bail:
-       brelse(bh);
 
        /* Release quota pointers in case we acquired them */
        for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
                dqput(transfer_to[qtype]);
 
        if (!status && attr->ia_valid & ATTR_MODE) {
-               status = posix_acl_chmod(inode, inode->i_mode);
+               status = ocfs2_acl_chmod(inode, bh);
                if (status < 0)
                        mlog_errno(status);
        }
        if (inode_locked)
                ocfs2_inode_unlock(inode, 1);
 
+       brelse(bh);
        return status;
 }
 
index 6b3e87189a6467fd3c72533db5c52d149ba2064d..a8f1225e6d9b767f5551bca0699f8e5e8f045e4c 100644 (file)
@@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir,
        struct ocfs2_dir_lookup_result lookup = { NULL, };
        sigset_t oldset;
        int did_block_signals = 0;
-       struct posix_acl *default_acl = NULL, *acl = NULL;
        struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
@@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir,
                goto leave;
        }
 
-       status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl);
-       if (status) {
-               mlog_errno(status);
-               goto leave;
-       }
-
        handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb,
                                                            S_ISDIR(mode),
                                                            xattr_credits));
@@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir,
                inc_nlink(dir);
        }
 
-       if (default_acl) {
-               status = ocfs2_set_acl(handle, inode, new_fe_bh,
-                                      ACL_TYPE_DEFAULT, default_acl,
-                                      meta_ac, data_ac);
-       }
-       if (!status && acl) {
-               status = ocfs2_set_acl(handle, inode, new_fe_bh,
-                                      ACL_TYPE_ACCESS, acl,
-                                      meta_ac, data_ac);
-       }
+       status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh,
+                        meta_ac, data_ac);
 
        if (status < 0) {
                mlog_errno(status);
@@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir,
        d_instantiate(dentry, inode);
        status = 0;
 leave:
-       if (default_acl)
-               posix_acl_release(default_acl);
-       if (acl)
-               posix_acl_release(acl);
        if (status < 0 && did_quota_inode)
                dquot_free_inode(inode);
        if (handle)
index 744d5d90c363a182812e2c7de7ecf4971452ce3c..92bbe93bfe1077332286d5a16fb5ea759c66c466 100644 (file)
@@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
        struct inode *inode = d_inode(old_dentry);
        struct buffer_head *old_bh = NULL;
        struct inode *new_orphan_inode = NULL;
-       struct posix_acl *default_acl, *acl;
-       umode_t mode;
 
        if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)))
                return -EOPNOTSUPP;
 
-       mode = inode->i_mode;
-       error = posix_acl_create(dir, &mode, &default_acl, &acl);
-       if (error) {
-               mlog_errno(error);
-               return error;
-       }
 
-       error = ocfs2_create_inode_in_orphan(dir, mode,
+       error = ocfs2_create_inode_in_orphan(dir, inode->i_mode,
                                             &new_orphan_inode);
        if (error) {
                mlog_errno(error);
@@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
        /* If the security isn't preserved, we need to re-initialize them. */
        if (!preserve) {
                error = ocfs2_init_security_and_acl(dir, new_orphan_inode,
-                                                   &new_dentry->d_name,
-                                                   default_acl, acl);
+                                                   &new_dentry->d_name);
                if (error)
                        mlog_errno(error);
        }
 out:
-       if (default_acl)
-               posix_acl_release(default_acl);
-       if (acl)
-               posix_acl_release(acl);
        if (!error) {
                error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode,
                                                       new_dentry);
index 7d3d979f57d9142169f93f2c88b27a612dc06dc4..f19b7381a9984a6f85194c503f8f22c591048df9 100644 (file)
@@ -7216,12 +7216,10 @@ out:
  */
 int ocfs2_init_security_and_acl(struct inode *dir,
                                struct inode *inode,
-                               const struct qstr *qstr,
-                               struct posix_acl *default_acl,
-                               struct posix_acl *acl)
+                               const struct qstr *qstr)
 {
-       struct buffer_head *dir_bh = NULL;
        int ret = 0;
+       struct buffer_head *dir_bh = NULL;
 
        ret = ocfs2_init_security_get(inode, dir, qstr, NULL);
        if (ret) {
@@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir,
                mlog_errno(ret);
                goto leave;
        }
-
-       if (!ret && default_acl)
-               ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT);
-       if (!ret && acl)
-               ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS);
+       ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL);
+       if (ret)
+               mlog_errno(ret);
 
        ocfs2_inode_unlock(dir, 0);
        brelse(dir_bh);
index f10d5b93c366c8a7d12ddc1c90766ea88ed3dc56..1633cc15ea1fdf75c7a507d8f5f60b921bf902e6 100644 (file)
@@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode,
                         bool preserve_security);
 int ocfs2_init_security_and_acl(struct inode *dir,
                                struct inode *inode,
-                               const struct qstr *qstr,
-                               struct posix_acl *default_acl,
-                               struct posix_acl *acl);
+                               const struct qstr *qstr);
 #endif /* OCFS2_XATTR_H */
index 17cb6b1dab753b9de6366f92b6ca3ba3cb204291..081d3d6df74ba54ccd0e1d754640b218e54f4111 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path);
 int vfs_open(const struct path *path, struct file *file,
             const struct cred *cred)
 {
-       struct dentry *dentry = path->dentry;
-       struct inode *inode = dentry->d_inode;
+       struct inode *inode = vfs_select_inode(path->dentry, file->f_flags);
 
-       file->f_path = *path;
-       if (dentry->d_flags & DCACHE_OP_SELECT_INODE) {
-               inode = dentry->d_op->d_select_inode(dentry, file->f_flags);
-               if (IS_ERR(inode))
-                       return PTR_ERR(inode);
-       }
+       if (IS_ERR(inode))
+               return PTR_ERR(inode);
 
+       file->f_path = *path;
        return do_dentry_open(file, inode, NULL, cred);
 }
 
index 5d972e6cd3fe97fcae5c782b59c72c0007668566..791235e03d1712ed92b62bc405483dd2e1ab972b 100644 (file)
@@ -411,9 +411,7 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
 {
        struct dentry *dentry;
 
-       inode_lock(dir->d_inode);
-       dentry = lookup_one_len(name->name, dir, name->len);
-       inode_unlock(dir->d_inode);
+       dentry = lookup_hash(name, dir);
 
        if (IS_ERR(dentry)) {
                if (PTR_ERR(dentry) == -ENOENT)
index c524fdddc7fb1f601d6d06ddb46d0affcca89c08..99899705b1055411723c3e5919ecbc1f50139e14 100644 (file)
@@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin)
 
 /* all accesses are serialized by namespace_sem */
 static struct user_namespace *user_ns;
-static struct mount *last_dest, *last_source, *dest_master;
+static struct mount *last_dest, *first_source, *last_source, *dest_master;
 static struct mountpoint *mp;
 static struct hlist_head *list;
 
@@ -221,20 +221,22 @@ static int propagate_one(struct mount *m)
                type = CL_MAKE_SHARED;
        } else {
                struct mount *n, *p;
+               bool done;
                for (n = m; ; n = p) {
                        p = n->mnt_master;
-                       if (p == dest_master || IS_MNT_MARKED(p)) {
-                               while (last_dest->mnt_master != p) {
-                                       last_source = last_source->mnt_master;
-                                       last_dest = last_source->mnt_parent;
-                               }
-                               if (!peers(n, last_dest)) {
-                                       last_source = last_source->mnt_master;
-                                       last_dest = last_source->mnt_parent;
-                               }
+                       if (p == dest_master || IS_MNT_MARKED(p))
                                break;
-                       }
                }
+               do {
+                       struct mount *parent = last_source->mnt_parent;
+                       if (last_source == first_source)
+                               break;
+                       done = parent->mnt_master == p;
+                       if (done && peers(n, parent))
+                               break;
+                       last_source = last_source->mnt_master;
+               } while (!done);
+
                type = CL_SLAVE;
                /* beginning of peer group among the slaves? */
                if (IS_MNT_SHARED(m))
@@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
         */
        user_ns = current->nsproxy->mnt_ns->user_ns;
        last_dest = dest_mnt;
+       first_source = source_mnt;
        last_source = source_mnt;
        mp = dest_mp;
        list = tree_list;
index b1755b23893e5e34513582d51d7a9cf9ed6b7227..0d163a84082dbddc846a871a46cfd75aa1eddfb8 100644 (file)
@@ -434,7 +434,7 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns,
                        && !lookup_symbol_name(wchan, symname))
                seq_printf(m, "%s", symname);
        else
-               seq_puts(m, "0\n");
+               seq_putc(m, '0');
 
        return 0;
 }
@@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf,
        struct mm_struct *mm = file->private_data;
        unsigned long env_start, env_end;
 
-       if (!mm)
+       /* Ensure the process spawned far enough to have an environment. */
+       if (!mm || !mm->env_end)
                return 0;
 
        page = (char *)__get_free_page(GFP_TEMPORARY);
index b018eb485019b1ed1bed2ccdc6c7b9914c575963..dd9bf7e410d2975f212accdf827df8a47c2d4c94 100644 (file)
@@ -1143,6 +1143,9 @@ static long do_splice_to(struct file *in, loff_t *ppos,
        if (unlikely(ret < 0))
                return ret;
 
+       if (unlikely(len > MAX_RW_COUNT))
+               len = MAX_RW_COUNT;
+
        if (in->f_op->splice_read)
                splice_read = in->f_op->splice_read;
        else
index fa92fe839fda2f989e3d52c368cc7520b80679e5..36661acaf33b4f61d27f8f48d467ced432521b3c 100644 (file)
@@ -919,14 +919,14 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 #endif
        }
 
-       ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+       ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
        if (ret < 0)
                goto out_bh;
 
        strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
        udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 
-       ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+       ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
        if (ret < 0)
                goto out_bh;
 
index 972b70625614f837310e39d41f6d8e836d205144..263829ef1873644a16ac3340555291a7fc240a4a 100644 (file)
@@ -212,7 +212,7 @@ extern int udf_get_filename(struct super_block *, const uint8_t *, int,
                            uint8_t *, int);
 extern int udf_put_filename(struct super_block *, const uint8_t *, int,
                            uint8_t *, int);
-extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
+extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int);
 
 /* ialloc.c */
 extern void udf_free_inode(struct inode *);
index 3ff42f4437f3eb3374fea6b1cd0e839b71b65577..695389a4fc239f245cfacfa5a1e2fde9eae5b4a7 100644 (file)
@@ -335,9 +335,21 @@ try_again:
        return u_len;
 }
 
-int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
+                     const uint8_t *ocu_i, int i_len)
 {
-       return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+       int s_len = 0;
+
+       if (i_len > 0) {
+               s_len = ocu_i[i_len - 1];
+               if (s_len >= i_len) {
+                       pr_err("incorrect dstring lengths (%d/%d)\n",
+                              s_len, i_len);
+                       return -EINVAL;
+               }
+       }
+
+       return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len,
                                 udf_uni2char_utf8, 0);
 }
 
index 14362a84c78e3ca1d5712d284d6dce22ed656ba1..3a932501d69078951ee4e70c1acd4ae429ca2a85 100644 (file)
@@ -394,13 +394,13 @@ struct acpi_data_node {
 
 static inline bool is_acpi_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && (fwnode->type == FWNODE_ACPI
+       return !IS_ERR_OR_NULL(fwnode) && (fwnode->type == FWNODE_ACPI
                || fwnode->type == FWNODE_ACPI_DATA);
 }
 
 static inline bool is_acpi_device_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_ACPI;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_ACPI;
 }
 
 static inline struct acpi_device *to_acpi_device_node(struct fwnode_handle *fwnode)
index 21ee41b92e8aaad2030f0d86c614dc4bfc4b5c5a..f1d5c5acc8dd58cd92df76915391b4737d90c083 100644 (file)
@@ -171,12 +171,13 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl);
 void bpf_register_map_type(struct bpf_map_type_list *tl);
 
 struct bpf_prog *bpf_prog_get(u32 ufd);
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog);
 void bpf_prog_put(struct bpf_prog *prog);
 void bpf_prog_put_rcu(struct bpf_prog *prog);
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd);
 struct bpf_map *__bpf_map_get(struct fd f);
-void bpf_map_inc(struct bpf_map *map, bool uref);
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
 void bpf_map_put_with_uref(struct bpf_map *map);
 void bpf_map_put(struct bpf_map *map);
 int bpf_map_precharge_memlock(u32 pages);
index eeae401a2412e5e8d32381805be1c13798b7a8fb..3d5202eda22f262e5c134bfb1f55b7e248dfcd79 100644 (file)
 #define __HAVE_BUILTIN_BSWAP32__
 #define __HAVE_BUILTIN_BSWAP64__
 #endif
-#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
+#if GCC_VERSION >= 40800
 #define __HAVE_BUILTIN_BSWAP16__
 #endif
 #endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
index 4bb4de8d95ea5869000db22baa662451c6acd1a0..7e9422cb5989c2e84ddd19eba2c91041f7b8622f 100644 (file)
@@ -565,4 +565,16 @@ static inline struct dentry *d_real(struct dentry *dentry)
                return dentry;
 }
 
+static inline struct inode *vfs_select_inode(struct dentry *dentry,
+                                            unsigned open_flags)
+{
+       struct inode *inode = d_inode(dentry);
+
+       if (inode && unlikely(dentry->d_flags & DCACHE_OP_SELECT_INODE))
+               inode = dentry->d_op->d_select_inode(dentry, open_flags);
+
+       return inode;
+}
+
+
 #endif /* __LINUX_DCACHE_H */
index 1626474567ac503d26802fcefbd9f147605e4449..df7acb51f3cc7d26668fb9a751b52ea8b53acf5d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/pfn.h>
 #include <linux/pstore.h>
 #include <linux/reboot.h>
+#include <linux/screen_info.h>
 
 #include <asm/page.h>
 
@@ -123,6 +124,13 @@ typedef struct {
        u32 imagesize;
 } efi_capsule_header_t;
 
+/*
+ * EFI capsule flags
+ */
+#define EFI_CAPSULE_PERSIST_ACROSS_RESET       0x00010000
+#define EFI_CAPSULE_POPULATE_SYSTEM_TABLE      0x00020000
+#define EFI_CAPSULE_INITIATE_RESET             0x00040000
+
 /*
  * Allocation types for calls to boottime->allocate_pages.
  */
@@ -282,9 +290,10 @@ typedef struct {
        efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
        void *__reserved;
        void *register_protocol_notify;
-       void *locate_handle;
+       efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
+                                     unsigned long *, efi_handle_t *);
        void *locate_device_path;
-       void *install_configuration_table;
+       efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
        void *load_image;
        void *start_image;
        void *exit;
@@ -623,6 +632,27 @@ void efi_native_runtime_setup(void);
        EFI_GUID(0x3152bca5, 0xeade, 0x433d, \
                 0x86, 0x2e, 0xc0, 0x1c, 0xdc, 0x29, 0x1f, 0x44)
 
+#define EFI_MEMORY_ATTRIBUTES_TABLE_GUID \
+       EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, \
+                0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20)
+
+#define EFI_CONSOLE_OUT_DEVICE_GUID \
+       EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, \
+                0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
+
+/*
+ * This GUID is used to pass to the kernel proper the struct screen_info
+ * structure that was populated by the stub based on the GOP protocol instance
+ * associated with ConOut
+ */
+#define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID \
+       EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, \
+                0xb9, 0xe, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95)
+
+#define LINUX_EFI_LOADER_ENTRY_GUID \
+       EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, \
+                0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f)
+
 typedef struct {
        efi_guid_t guid;
        u64 table;
@@ -847,6 +877,14 @@ typedef struct {
 
 #define EFI_INVALID_TABLE_ADDR         (~0UL)
 
+typedef struct {
+       u32 version;
+       u32 num_entries;
+       u32 desc_size;
+       u32 reserved;
+       efi_memory_desc_t entry[0];
+} efi_memory_attributes_table_t;
+
 /*
  * All runtime access to EFI goes through this structure:
  */
@@ -868,6 +906,7 @@ extern struct efi {
        unsigned long config_table;     /* config tables */
        unsigned long esrt;             /* ESRT table */
        unsigned long properties_table; /* properties table */
+       unsigned long mem_attr_table;   /* memory attributes table */
        efi_get_time_t *get_time;
        efi_set_time_t *set_time;
        efi_get_wakeup_time_t *get_wakeup_time;
@@ -883,7 +922,7 @@ extern struct efi {
        efi_get_next_high_mono_count_t *get_next_high_mono_count;
        efi_reset_system_t *reset_system;
        efi_set_virtual_address_map_t *set_virtual_address_map;
-       struct efi_memory_map *memmap;
+       struct efi_memory_map memmap;
        unsigned long flags;
 } efi;
 
@@ -945,7 +984,6 @@ extern void efi_initialize_iomem_resources(struct resource *code_resource,
 extern void efi_get_time(struct timespec *now);
 extern void efi_reserve_boot_services(void);
 extern int efi_get_fdt_params(struct efi_fdt_params *params);
-extern struct efi_memory_map memmap;
 extern struct kobject *efi_kobj;
 
 extern int efi_reboot_quirk_mode;
@@ -957,12 +995,34 @@ extern void __init efi_fake_memmap(void);
 static inline void efi_fake_memmap(void) { }
 #endif
 
+/*
+ * efi_memattr_perm_setter - arch specific callback function passed into
+ *                           efi_memattr_apply_permissions() that updates the
+ *                           mapping permissions described by the second
+ *                           argument in the page tables referred to by the
+ *                           first argument.
+ */
+typedef int (*efi_memattr_perm_setter)(struct mm_struct *, efi_memory_desc_t *);
+
+extern int efi_memattr_init(void);
+extern int efi_memattr_apply_permissions(struct mm_struct *mm,
+                                        efi_memattr_perm_setter fn);
+
 /* Iterate through an efi_memory_map */
-#define for_each_efi_memory_desc(m, md)                                           \
+#define for_each_efi_memory_desc_in_map(m, md)                            \
        for ((md) = (m)->map;                                              \
             (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
             (md) = (void *)(md) + (m)->desc_size)
 
+/**
+ * for_each_efi_memory_desc - iterate over descriptors in efi.memmap
+ * @md: the efi_memory_desc_t * iterator
+ *
+ * Once the loop finishes @md must not be accessed.
+ */
+#define for_each_efi_memory_desc(md) \
+       for_each_efi_memory_desc_in_map(&efi.memmap, md)
+
 /*
  * Format an EFI memory descriptor's type and attributes to a user-provided
  * character buffer, as per snprintf(), and return the buffer.
@@ -1000,7 +1060,6 @@ extern int __init efi_setup_pcdp_console(char *);
  * possible, remove EFI-related code altogether.
  */
 #define EFI_BOOT               0       /* Were we booted from EFI? */
-#define EFI_SYSTEM_TABLES      1       /* Can we use EFI system tables? */
 #define EFI_CONFIG_TABLES      2       /* Can we use EFI config tables? */
 #define EFI_RUNTIME_SERVICES   3       /* Can we use runtime services? */
 #define EFI_MEMMAP             4       /* Can we use EFI memory map? */
@@ -1026,8 +1085,16 @@ static inline bool efi_enabled(int feature)
 }
 static inline void
 efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {}
+
+static inline bool
+efi_capsule_pending(int *reset_type)
+{
+       return false;
+}
 #endif
 
+extern int efi_status_to_err(efi_status_t status);
+
 /*
  * Variable Attributes
  */
@@ -1180,6 +1247,80 @@ struct efi_simple_text_output_protocol {
        void *test_string;
 };
 
+#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR              0
+#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR              1
+#define PIXEL_BIT_MASK                                 2
+#define PIXEL_BLT_ONLY                                 3
+#define PIXEL_FORMAT_MAX                               4
+
+struct efi_pixel_bitmask {
+       u32 red_mask;
+       u32 green_mask;
+       u32 blue_mask;
+       u32 reserved_mask;
+};
+
+struct efi_graphics_output_mode_info {
+       u32 version;
+       u32 horizontal_resolution;
+       u32 vertical_resolution;
+       int pixel_format;
+       struct efi_pixel_bitmask pixel_information;
+       u32 pixels_per_scan_line;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_32 {
+       u32 max_mode;
+       u32 mode;
+       u32 info;
+       u32 size_of_info;
+       u64 frame_buffer_base;
+       u32 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode_64 {
+       u32 max_mode;
+       u32 mode;
+       u64 info;
+       u64 size_of_info;
+       u64 frame_buffer_base;
+       u64 frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_mode {
+       u32 max_mode;
+       u32 mode;
+       unsigned long info;
+       unsigned long size_of_info;
+       u64 frame_buffer_base;
+       unsigned long frame_buffer_size;
+} __packed;
+
+struct efi_graphics_output_protocol_32 {
+       u32 query_mode;
+       u32 set_mode;
+       u32 blt;
+       u32 mode;
+};
+
+struct efi_graphics_output_protocol_64 {
+       u64 query_mode;
+       u64 set_mode;
+       u64 blt;
+       u64 mode;
+};
+
+struct efi_graphics_output_protocol {
+       unsigned long query_mode;
+       unsigned long set_mode;
+       unsigned long blt;
+       struct efi_graphics_output_protocol_mode *mode;
+};
+
+typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
+       struct efi_graphics_output_protocol *, u32, unsigned long *,
+       struct efi_graphics_output_mode_info **);
+
 extern struct list_head efivar_sysfs_list;
 
 static inline void
@@ -1195,8 +1336,7 @@ int efivars_unregister(struct efivars *efivars);
 struct kobject *efivars_kobject(void);
 
 int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
-               void *data, bool atomic, bool duplicates,
-               struct list_head *head);
+               void *data, bool duplicates, struct list_head *head);
 
 void efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
 void efivar_entry_remove(struct efivar_entry *entry);
@@ -1242,6 +1382,13 @@ int efivars_sysfs_init(void);
 #define EFIVARS_DATA_SIZE_MAX 1024
 
 #endif /* CONFIG_EFI_VARS */
+extern bool efi_capsule_pending(int *reset_type);
+
+extern int efi_capsule_supported(efi_guid_t guid, u32 flags,
+                                size_t size, int *reset);
+
+extern int efi_capsule_update(efi_capsule_header_t *capsule,
+                             struct page **pages);
 
 #ifdef CONFIG_EFI_RUNTIME_MAP
 int efi_runtime_map_init(struct kobject *);
@@ -1319,5 +1466,9 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
 
 efi_status_t efi_parse_options(char *cmdline);
 
+efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
+                          struct screen_info *si, efi_guid_t *proto,
+                          unsigned long size);
+
 bool efi_runtime_disabled(void);
 #endif /* _LINUX_EFI_H */
index 1afde47e1528c36ad8613c2e56fb23d1f614e476..79c52fa81cac9ea2dd3a1bea6e1093cd81fdb79c 100644 (file)
 #error Wordsize not 32 or 64
 #endif
 
+/*
+ * The above primes are actively bad for hashing, since they are
+ * too sparse. The 32-bit one is mostly ok, the 64-bit one causes
+ * real problems. Besides, the "prime" part is pointless for the
+ * multiplicative hash.
+ *
+ * Although a random odd number will do, it turns out that the golden
+ * ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
+ * properties.
+ *
+ * These are the negative, (1 - phi) = (phi^2) = (3 - sqrt(5))/2.
+ * (See Knuth vol 3, section 6.4, exercise 9.)
+ */
+#define GOLDEN_RATIO_32 0x61C88647
+#define GOLDEN_RATIO_64 0x61C8864680B583EBull
+
 static __always_inline u64 hash_64(u64 val, unsigned int bits)
 {
        u64 hash = val;
 
-#if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
-       hash = hash * GOLDEN_RATIO_PRIME_64;
+#if BITS_PER_LONG == 64
+       hash = hash * GOLDEN_RATIO_64;
 #else
        /*  Sigh, gcc can't optimise this alone like it does for 32 bits. */
        u64 n = hash;
index d5569734f6724d6194497fe9842e9aac56ef0b09..548fd535fd02399634bace0607471d07b973d8dc 100644 (file)
@@ -28,6 +28,11 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
        return (struct ethhdr *)skb_mac_header(skb);
 }
 
+static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
+{
+       return (struct ethhdr *)skb_inner_mac_header(skb);
+}
+
 int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr);
 
 extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len);
index c06c44242f3993ae05a8454f844425a0bd14d525..30f089ebe0a4540a91053f406526f323bd1d24f0 100644 (file)
@@ -152,6 +152,8 @@ struct kernfs_syscall_ops {
        int (*rmdir)(struct kernfs_node *kn);
        int (*rename)(struct kernfs_node *kn, struct kernfs_node *new_parent,
                      const char *new_name);
+       int (*show_path)(struct seq_file *sf, struct kernfs_node *kn,
+                        struct kernfs_root *root);
 };
 
 struct kernfs_root {
index b288965e8101dc38f8a4cf4f13f55c0ba2dfbb7a..2c14eeca46f039d78c1e82f4ccd3053e4da15786 100644 (file)
@@ -173,10 +173,12 @@ enum s2mps11_regulators {
 
 #define S2MPS11_LDO_VSEL_MASK  0x3F
 #define S2MPS11_BUCK_VSEL_MASK 0xFF
+#define S2MPS11_BUCK9_VSEL_MASK        0x1F
 #define S2MPS11_ENABLE_MASK    (0x03 << S2MPS11_ENABLE_SHIFT)
 #define S2MPS11_ENABLE_SHIFT   0x06
 #define S2MPS11_LDO_N_VOLTAGES (S2MPS11_LDO_VSEL_MASK + 1)
 #define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
+#define S2MPS11_BUCK9_N_VOLTAGES (S2MPS11_BUCK9_VSEL_MASK + 1)
 #define S2MPS11_RAMP_DELAY     25000           /* uV/us */
 
 #define S2MPS11_CTRL1_PWRHOLD_MASK     BIT(4)
index 864d7221de846e44e600245eaac773ab7116bcca..8f468e0d2534eddaa178fdf252e7cf4e53f3330b 100644 (file)
@@ -500,11 +500,20 @@ static inline int page_mapcount(struct page *page)
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
 int total_mapcount(struct page *page);
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
 #else
 static inline int total_mapcount(struct page *page)
 {
        return page_mapcount(page);
 }
+static inline int page_trans_huge_mapcount(struct page *page,
+                                          int *total_mapcount)
+{
+       int mapcount = page_mapcount(page);
+       if (total_mapcount)
+               *total_mapcount = mapcount;
+       return mapcount;
+}
 #endif
 
 static inline struct page *virt_to_head_page(const void *x)
index 77d01700daf7b826bdf19c2022d5260adb640e0b..ec5ec2818a288d52158098e829fb57d68e4ec87b 100644 (file)
@@ -79,6 +79,8 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+struct qstr;
+extern struct dentry *lookup_hash(const struct qstr *, struct dentry *);
 
 extern int follow_down_one(struct path *);
 extern int follow_down(struct path *);
index 49175e4ced11288c535eb41cf967683858d6bc3b..f840d77c6c313cdeecad084e366a762b00512319 100644 (file)
@@ -246,7 +246,15 @@ do {                                                               \
        net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
 #define net_info_ratelimited(fmt, ...)                         \
        net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...)                                  \
+do {                                                                   \
+       DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt);                 \
+       if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) &&        \
+           net_ratelimit())                                            \
+               __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__);    \
+} while (0)
+#elif defined(DEBUG)
 #define net_dbg_ratelimited(fmt, ...)                          \
        net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
 #else
index 8395308a24456028f22621c5e00f757c4c817576..78181a88903b62c6b240b12da1fecad596ff5738 100644 (file)
@@ -2164,6 +2164,9 @@ struct packet_offload {
 
 struct udp_offload;
 
+/* 'skb->encapsulation' is set before gro_complete() is called.  gro_complete()
+ * must set 'skb->inner_mac_header' to the beginning of tunnel payload.
+ */
 struct udp_offload_callbacks {
        struct sk_buff          **(*gro_receive)(struct sk_buff **head,
                                                 struct sk_buff *skb,
@@ -4004,7 +4007,7 @@ netdev_features_t netif_skb_features(struct sk_buff *skb);
 
 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
 {
-       netdev_features_t feature = gso_type << NETIF_F_GSO_SHIFT;
+       netdev_features_t feature = (netdev_features_t)gso_type << NETIF_F_GSO_SHIFT;
 
        /* check flags correspondence */
        BUILD_BUG_ON(SKB_GSO_TCPV4   != (NETIF_F_TSO >> NETIF_F_GSO_SHIFT));
index 7fcb681baadf172824017d067721c65189638565..31758036787c3a490bb4aa31dda27b8a12be48da 100644 (file)
@@ -133,7 +133,7 @@ void of_core_init(void);
 
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
-       return fwnode && fwnode->type == FWNODE_OF;
+       return !IS_ERR_OR_NULL(fwnode) && fwnode->type == FWNODE_OF;
 }
 
 static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
index f4ed4f1b0c77ac84f65a4586502863fd78448063..6b052aa7b5b79de6712ee36fe3a424f6f93bd548 100644 (file)
@@ -516,6 +516,27 @@ static inline int PageTransCompound(struct page *page)
        return PageCompound(page);
 }
 
+/*
+ * PageTransCompoundMap is the same as PageTransCompound, but it also
+ * guarantees the primary MMU has the entire compound page mapped
+ * through pmd_trans_huge, which in turn guarantees the secondary MMUs
+ * can also map the entire compound page. This allows the secondary
+ * MMUs to call get_user_pages() only once for each compound page and
+ * to immediately map the entire compound page with a single secondary
+ * MMU fault. If there will be a pmd split later, the secondary MMUs
+ * will get an update through the MMU notifier invalidation through
+ * split_huge_pmd().
+ *
+ * Unlike PageTransCompound, this is safe to be called only while
+ * split_huge_pmd() cannot run from under us, like if protected by the
+ * MMU notifier, otherwise it may result in page->_mapcount < 0 false
+ * positives.
+ */
+static inline int PageTransCompoundMap(struct page *page)
+{
+       return PageTransCompound(page) && atomic_read(&page->_mapcount) < 0;
+}
+
 /*
  * PageTransTail returns true for both transparent huge pages
  * and hugetlbfs pages, so it should only be called when it's known
@@ -559,6 +580,7 @@ static inline int TestClearPageDoubleMap(struct page *page)
 #else
 TESTPAGEFLAG_FALSE(TransHuge)
 TESTPAGEFLAG_FALSE(TransCompound)
+TESTPAGEFLAG_FALSE(TransCompoundMap)
 TESTPAGEFLAG_FALSE(TransTail)
 TESTPAGEFLAG_FALSE(DoubleMap)
        TESTSETFLAG_FALSE(DoubleMap)
diff --git a/include/linux/proportions.h b/include/linux/proportions.h
deleted file mode 100644 (file)
index 2122133..0000000
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * FLoating proportions
- *
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- *
- * This file contains the public data structure and API definitions.
- */
-
-#ifndef _LINUX_PROPORTIONS_H
-#define _LINUX_PROPORTIONS_H
-
-#include <linux/percpu_counter.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/gfp.h>
-
-struct prop_global {
-       /*
-        * The period over which we differentiate
-        *
-        *   period = 2^shift
-        */
-       int shift;
-       /*
-        * The total event counter aka 'time'.
-        *
-        * Treated as an unsigned long; the lower 'shift - 1' bits are the
-        * counter bits, the remaining upper bits the period counter.
-        */
-       struct percpu_counter events;
-};
-
-/*
- * global proportion descriptor
- *
- * this is needed to consistently flip prop_global structures.
- */
-struct prop_descriptor {
-       int index;
-       struct prop_global pg[2];
-       struct mutex mutex;             /* serialize the prop_global switch */
-};
-
-int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
-void prop_change_shift(struct prop_descriptor *pd, int new_shift);
-
-/*
- * ----- PERCPU ------
- */
-
-struct prop_local_percpu {
-       /*
-        * the local events counter
-        */
-       struct percpu_counter events;
-
-       /*
-        * snapshot of the last seen global state
-        */
-       int shift;
-       unsigned long period;
-       raw_spinlock_t lock;            /* protect the snapshot state */
-};
-
-int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
-void prop_local_destroy_percpu(struct prop_local_percpu *pl);
-void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
-void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
-               long *numerator, long *denominator);
-
-static inline
-void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __prop_inc_percpu(pd, pl);
-       local_irq_restore(flags);
-}
-
-/*
- * Limit the time part in order to ensure there are some bits left for the
- * cycle counter and fraction multiply.
- */
-#if BITS_PER_LONG == 32
-#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
-#else
-#define PROP_MAX_SHIFT (BITS_PER_LONG/2)
-#endif
-
-#define PROP_FRAC_SHIFT                (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
-#define PROP_FRAC_BASE         (1UL << PROP_FRAC_SHIFT)
-
-void __prop_inc_percpu_max(struct prop_descriptor *pd,
-                          struct prop_local_percpu *pl, long frac);
-
-
-/*
- * ----- SINGLE ------
- */
-
-struct prop_local_single {
-       /*
-        * the local events counter
-        */
-       unsigned long events;
-
-       /*
-        * snapshot of the last seen global state
-        * and a lock protecting this state
-        */
-       unsigned long period;
-       int shift;
-       raw_spinlock_t lock;            /* protect the snapshot state */
-};
-
-#define INIT_PROP_LOCAL_SINGLE(name)                   \
-{      .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock),    \
-}
-
-int prop_local_init_single(struct prop_local_single *pl);
-void prop_local_destroy_single(struct prop_local_single *pl);
-void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
-void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
-               long *numerator, long *denominator);
-
-static inline
-void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __prop_inc_single(pd, pl);
-       local_irq_restore(flags);
-}
-
-#endif /* _LINUX_PROPORTIONS_H */
index 2657aff2725b4fdc8243e7beb836747bbd5817d6..5f1533e3d03206d2d64f67a6a1db51b785f0a1fb 100644 (file)
@@ -508,14 +508,7 @@ int rcu_read_lock_bh_held(void);
  * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
  * critical section unless it can prove otherwise.
  */
-#ifdef CONFIG_PREEMPT_COUNT
 int rcu_read_lock_sched_held(void);
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
-       return 1;
-}
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
 
 #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
@@ -532,18 +525,10 @@ static inline int rcu_read_lock_bh_held(void)
        return 1;
 }
 
-#ifdef CONFIG_PREEMPT_COUNT
 static inline int rcu_read_lock_sched_held(void)
 {
-       return preempt_count() != 0 || irqs_disabled();
-}
-#else /* #ifdef CONFIG_PREEMPT_COUNT */
-static inline int rcu_read_lock_sched_held(void)
-{
-       return 1;
+       return !preemptible();
 }
-#endif /* #else #ifdef CONFIG_PREEMPT_COUNT */
-
 #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
 #ifdef CONFIG_PROVE_RCU
@@ -1144,4 +1129,17 @@ static inline void rcu_sysidle_force_exit(void)
 #endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 
 
+/*
+ * Dump the ftrace buffer, but only one time per callsite per boot.
+ */
+#define rcu_ftrace_dump(oops_dump_mode) \
+do { \
+       static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
+       \
+       if (!atomic_read(&___rfd_beenhere) && \
+           !atomic_xchg(&___rfd_beenhere, 1)) \
+               ftrace_dump(oops_dump_mode); \
+} while (0)
+
+
 #endif /* __LINUX_RCUPDATE_H */
index 64809aea661cee43646c4803247604cc40f16d6c..93aea75029fbd7795e99ffcf77d781ed122f1cba 100644 (file)
@@ -149,6 +149,22 @@ static inline unsigned long rcu_batches_completed_sched(void)
        return 0;
 }
 
+/*
+ * Return the number of expedited grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed(void)
+{
+       return 0;
+}
+
+/*
+ * Return the number of expedited sched grace periods completed.
+ */
+static inline unsigned long rcu_exp_batches_completed_sched(void)
+{
+       return 0;
+}
+
 static inline void rcu_force_quiescent_state(void)
 {
 }
index ad1eda9fa4daea077998d253dc604c18c3d444c6..5043cb823fb273b48a24c5366e800ee704000355 100644 (file)
@@ -87,6 +87,8 @@ unsigned long rcu_batches_started_sched(void);
 unsigned long rcu_batches_completed(void);
 unsigned long rcu_batches_completed_bh(void);
 unsigned long rcu_batches_completed_sched(void);
+unsigned long rcu_exp_batches_completed(void);
+unsigned long rcu_exp_batches_completed_sched(void);
 void show_rcu_gp_kthreads(void);
 
 void rcu_force_quiescent_state(void);
diff --git a/include/linux/rio_mport_cdev.h b/include/linux/rio_mport_cdev.h
deleted file mode 100644 (file)
index b65d19d..0000000
+++ /dev/null
@@ -1,271 +0,0 @@
-/*
- * Copyright (c) 2015-2016, Integrated Device Technology Inc.
- * Copyright (c) 2015, Prodrive Technologies
- * Copyright (c) 2015, Texas Instruments Incorporated
- * Copyright (c) 2015, RapidIO Trade Association
- * All rights reserved.
- *
- * This software is available to you under a choice of one of two licenses.
- * You may choose to be licensed under the terms of the GNU General Public
- * License(GPL) Version 2, or the BSD-3 Clause license below:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * 3. Neither the name of the copyright holder nor the names of its contributors
- * may be used to endorse or promote products derived from this software without
- * specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
- * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
- * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
- * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _RIO_MPORT_CDEV_H_
-#define _RIO_MPORT_CDEV_H_
-
-#ifndef __user
-#define __user
-#endif
-
-struct rio_mport_maint_io {
-       uint32_t rioid;         /* destID of remote device */
-       uint32_t hopcount;      /* hopcount to remote device */
-       uint32_t offset;        /* offset in register space */
-       size_t length;          /* length in bytes */
-       void __user *buffer;    /* data buffer */
-};
-
-/*
- * Definitions for RapidIO data transfers:
- * - memory mapped (MAPPED)
- * - packet generation from memory (TRANSFER)
- */
-#define RIO_TRANSFER_MODE_MAPPED       (1 << 0)
-#define RIO_TRANSFER_MODE_TRANSFER     (1 << 1)
-#define RIO_CAP_DBL_SEND               (1 << 2)
-#define RIO_CAP_DBL_RECV               (1 << 3)
-#define RIO_CAP_PW_SEND                        (1 << 4)
-#define RIO_CAP_PW_RECV                        (1 << 5)
-#define RIO_CAP_MAP_OUTB               (1 << 6)
-#define RIO_CAP_MAP_INB                        (1 << 7)
-
-struct rio_mport_properties {
-       uint16_t hdid;
-       uint8_t id;                     /* Physical port ID */
-       uint8_t  index;
-       uint32_t flags;
-       uint32_t sys_size;              /* Default addressing size */
-       uint8_t  port_ok;
-       uint8_t  link_speed;
-       uint8_t  link_width;
-       uint32_t dma_max_sge;
-       uint32_t dma_max_size;
-       uint32_t dma_align;
-       uint32_t transfer_mode;         /* Default transfer mode */
-       uint32_t cap_sys_size;          /* Capable system sizes */
-       uint32_t cap_addr_size;         /* Capable addressing sizes */
-       uint32_t cap_transfer_mode;     /* Capable transfer modes */
-       uint32_t cap_mport;             /* Mport capabilities */
-};
-
-/*
- * Definitions for RapidIO events;
- * - incoming port-writes
- * - incoming doorbells
- */
-#define RIO_DOORBELL   (1 << 0)
-#define RIO_PORTWRITE  (1 << 1)
-
-struct rio_doorbell {
-       uint32_t rioid;
-       uint16_t payload;
-};
-
-struct rio_doorbell_filter {
-       uint32_t rioid;                 /* 0xffffffff to match all ids */
-       uint16_t low;
-       uint16_t high;
-};
-
-
-struct rio_portwrite {
-       uint32_t payload[16];
-};
-
-struct rio_pw_filter {
-       uint32_t mask;
-       uint32_t low;
-       uint32_t high;
-};
-
-/* RapidIO base address for inbound requests set to value defined below
- * indicates that no specific RIO-to-local address translation is requested
- * and driver should use direct (one-to-one) address mapping.
-*/
-#define RIO_MAP_ANY_ADDR       (uint64_t)(~((uint64_t) 0))
-
-struct rio_mmap {
-       uint32_t rioid;
-       uint64_t rio_addr;
-       uint64_t length;
-       uint64_t handle;
-       void *address;
-};
-
-struct rio_dma_mem {
-       uint64_t length;                /* length of DMA memory */
-       uint64_t dma_handle;            /* handle associated with this memory */
-       void *buffer;                   /* pointer to this memory */
-};
-
-
-struct rio_event {
-       unsigned int header;    /* event type RIO_DOORBELL or RIO_PORTWRITE */
-       union {
-               struct rio_doorbell doorbell;   /* header for RIO_DOORBELL */
-               struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
-       } u;
-};
-
-enum rio_transfer_sync {
-       RIO_TRANSFER_SYNC,      /* synchronous transfer */
-       RIO_TRANSFER_ASYNC,     /* asynchronous transfer */
-       RIO_TRANSFER_FAF,       /* fire-and-forget transfer */
-};
-
-enum rio_transfer_dir {
-       RIO_TRANSFER_DIR_READ,  /* Read operation */
-       RIO_TRANSFER_DIR_WRITE, /* Write operation */
-};
-
-/*
- * RapidIO data exchange transactions are lists of individual transfers. Each
- * transfer exchanges data between two RapidIO devices by remote direct memory
- * access and has its own completion code.
- *
- * The RapidIO specification defines four types of data exchange requests:
- * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
- * to specify the required type of write operation or combination of them when
- * only the last data packet requires response.
- *
- * NREAD:    read up to 256 bytes from remote device memory into local memory
- * NWRITE:   write up to 256 bytes from local memory to remote device memory
- *           without confirmation
- * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
- * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
- *
- * The default exchange is chosen from NREAD and any of the WRITE modes as the
- * driver sees fit. For write requests the user can explicitly choose between
- * any of the write modes for each transaction.
- */
-enum rio_exchange {
-       RIO_EXCHANGE_DEFAULT,   /* Default method */
-       RIO_EXCHANGE_NWRITE,    /* All packets using NWRITE */
-       RIO_EXCHANGE_SWRITE,    /* All packets using SWRITE */
-       RIO_EXCHANGE_NWRITE_R,  /* Last packet NWRITE_R, others NWRITE */
-       RIO_EXCHANGE_SWRITE_R,  /* Last packet NWRITE_R, others SWRITE */
-       RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
-};
-
-struct rio_transfer_io {
-       uint32_t rioid;                 /* Target destID */
-       uint64_t rio_addr;              /* Address in target's RIO mem space */
-       enum rio_exchange method;       /* Data exchange method */
-       void __user *loc_addr;
-       uint64_t handle;
-       uint64_t offset;                /* Offset in buffer */
-       uint64_t length;                /* Length in bytes */
-       uint32_t completion_code;       /* Completion code for this transfer */
-};
-
-struct rio_transaction {
-       uint32_t transfer_mode;         /* Data transfer mode */
-       enum rio_transfer_sync sync;    /* Synchronization method */
-       enum rio_transfer_dir dir;      /* Transfer direction */
-       size_t count;                   /* Number of transfers */
-       struct rio_transfer_io __user *block;   /* Array of <count> transfers */
-};
-
-struct rio_async_tx_wait {
-       uint32_t token;         /* DMA transaction ID token */
-       uint32_t timeout;       /* Wait timeout in msec, if 0 use default TO */
-};
-
-#define RIO_MAX_DEVNAME_SZ     20
-
-struct rio_rdev_info {
-       uint32_t destid;
-       uint8_t hopcount;
-       uint32_t comptag;
-       char name[RIO_MAX_DEVNAME_SZ + 1];
-};
-
-/* Driver IOCTL codes */
-#define RIO_MPORT_DRV_MAGIC           'm'
-
-#define RIO_MPORT_MAINT_HDID_SET       \
-       _IOW(RIO_MPORT_DRV_MAGIC, 1, uint16_t)
-#define RIO_MPORT_MAINT_COMPTAG_SET    \
-       _IOW(RIO_MPORT_DRV_MAGIC, 2, uint32_t)
-#define RIO_MPORT_MAINT_PORT_IDX_GET   \
-       _IOR(RIO_MPORT_DRV_MAGIC, 3, uint32_t)
-#define RIO_MPORT_GET_PROPERTIES \
-       _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
-#define RIO_MPORT_MAINT_READ_LOCAL \
-       _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_LOCAL \
-       _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_READ_REMOTE \
-       _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
-#define RIO_MPORT_MAINT_WRITE_REMOTE \
-       _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
-#define RIO_ENABLE_DOORBELL_RANGE      \
-       _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
-#define RIO_DISABLE_DOORBELL_RANGE     \
-       _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
-#define RIO_ENABLE_PORTWRITE_RANGE     \
-       _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
-#define RIO_DISABLE_PORTWRITE_RANGE    \
-       _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
-#define RIO_SET_EVENT_MASK             \
-       _IOW(RIO_MPORT_DRV_MAGIC, 13, unsigned int)
-#define RIO_GET_EVENT_MASK             \
-       _IOR(RIO_MPORT_DRV_MAGIC, 14, unsigned int)
-#define RIO_MAP_OUTBOUND \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
-#define RIO_UNMAP_OUTBOUND \
-       _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
-#define RIO_MAP_INBOUND \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
-#define RIO_UNMAP_INBOUND \
-       _IOW(RIO_MPORT_DRV_MAGIC, 18, uint64_t)
-#define RIO_ALLOC_DMA \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
-#define RIO_FREE_DMA \
-       _IOW(RIO_MPORT_DRV_MAGIC, 20, uint64_t)
-#define RIO_TRANSFER \
-       _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
-#define RIO_WAIT_FOR_ASYNC \
-       _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
-#define RIO_DEV_ADD \
-       _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
-#define RIO_DEV_DEL \
-       _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
-
-#endif /* _RIO_MPORT_CDEV_H_ */
index 52c4847b05e2882a72d04c3c75fc4d55c2b4a6b9..e8dfa6f0d843ee6695a6bfef45a6eb54ed7a7295 100644 (file)
@@ -40,7 +40,6 @@ struct sched_param {
 #include <linux/pid.h>
 #include <linux/percpu.h>
 #include <linux/topology.h>
-#include <linux/proportions.h>
 #include <linux/seccomp.h>
 #include <linux/rcupdate.h>
 #include <linux/rculist.h>
@@ -1596,6 +1595,7 @@ struct task_struct {
 
        unsigned long sas_ss_sp;
        size_t sas_ss_size;
+       unsigned sas_ss_flags;
 
        struct callback_head *task_works;
 
@@ -2575,6 +2575,18 @@ static inline int kill_cad_pid(int sig, int priv)
  */
 static inline int on_sig_stack(unsigned long sp)
 {
+       /*
+        * If the signal stack is SS_AUTODISARM then, by construction, we
+        * can't be on the signal stack unless user code deliberately set
+        * SS_AUTODISARM when we were already on it.
+        *
+        * This improves reliability: if user state gets corrupted such that
+        * the stack pointer points very close to the end of the signal stack,
+        * then this check will enable the signal to be handled anyway.
+        */
+       if (current->sas_ss_flags & SS_AUTODISARM)
+               return 0;
+
 #ifdef CONFIG_STACK_GROWSUP
        return sp >= current->sas_ss_sp &&
                sp - current->sas_ss_sp < current->sas_ss_size;
@@ -2592,6 +2604,13 @@ static inline int sas_ss_flags(unsigned long sp)
        return on_sig_stack(sp) ? SS_ONSTACK : 0;
 }
 
+static inline void sas_ss_reset(struct task_struct *p)
+{
+       p->sas_ss_sp = 0;
+       p->sas_ss_size = 0;
+       p->sas_ss_flags = SS_DISABLE;
+}
+
 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
 {
        if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
index 92557bbce7e7b9a3d9c6c6ed4c4d4f54ff7ff9e5..3fbe81444d31e85db8b1bb0bb517a7d5ae4b5860 100644 (file)
@@ -432,8 +432,10 @@ int __save_altstack(stack_t __user *, unsigned long);
        stack_t __user *__uss = uss; \
        struct task_struct *t = current; \
        put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
-       put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+       put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
        put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+       if (t->sas_ss_flags & SS_AUTODISARM) \
+               sas_ss_reset(t); \
 } while (0);
 
 #ifdef CONFIG_PROC_FS
index 2b83359c19cabee6dc211c887518d001f05081f4..ad220359f1b072cfbf29456871312bbf07d5cceb 100644 (file)
@@ -418,7 +418,7 @@ extern sector_t swapdev_block(int, pgoff_t);
 extern int page_swapcount(struct page *);
 extern int swp_swapcount(swp_entry_t entry);
 extern struct swap_info_struct *page_swap_info(struct page *);
-extern int reuse_swap_page(struct page *);
+extern bool reuse_swap_page(struct page *, int *);
 extern int try_to_free_swap(struct page *);
 struct backing_dev_info;
 
@@ -513,8 +513,8 @@ static inline int swp_swapcount(swp_entry_t entry)
        return 0;
 }
 
-#define reuse_swap_page(page) \
-       (!PageTransCompound(page) && page_mapcount(page) == 1)
+#define reuse_swap_page(page, total_mapcount) \
+       (page_trans_huge_mapcount(page, total_mapcount) == 1)
 
 static inline int try_to_free_swap(struct page *page)
 {
@@ -533,6 +533,10 @@ static inline swp_entry_t get_swap_page(void)
 #ifdef CONFIG_MEMCG
 static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
 {
+       /* Cgroup2 doesn't have per-cgroup swappiness */
+       if (cgroup_subsys_on_dfl(memory_cgrp_subsys))
+               return vm_swappiness;
+
        /* root ? */
        if (mem_cgroup_disabled() || !memcg->css.parent)
                return vm_swappiness;
index fd9bcfedad42d70c4525fd30178756fb43ccf581..1b5d1cd796e2b753bdd1ff586427a35c033a3ae9 100644 (file)
@@ -87,6 +87,7 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 size_t iov_iter_zero(size_t bytes, struct iov_iter *);
 unsigned long iov_iter_alignment(const struct iov_iter *i);
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
 void iov_iter_init(struct iov_iter *i, int direction, const struct iovec *iov,
                        unsigned long nr_segs, size_t count);
 void iov_iter_kvec(struct iov_iter *i, int direction, const struct kvec *kvec,
index 730d82ad6ee521beddc3af344d8e99a339f74dd5..24cd3949a9a4f04f78ff31ce681d71833f1bbe54 100644 (file)
@@ -80,6 +80,7 @@ struct netns_xfrm {
        struct flow_cache       flow_cache_global;
        atomic_t                flow_cache_genid;
        struct list_head        flow_cache_gc_list;
+       atomic_t                flow_cache_gc_count;
        spinlock_t              flow_cache_gc_lock;
        struct work_struct      flow_cache_gc_work;
        struct work_struct      flow_cache_flush_work;
index b83114077cee33d184a2bef45da1be686f94a72f..a1140249ec25dfad777f12664de3845574814f8e 100644 (file)
@@ -106,15 +106,6 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
        return iptunnel_handle_offloads(skb, type);
 }
 
-static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
-{
-       struct udphdr *uh;
-
-       uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
-       skb_shinfo(skb)->gso_type |= uh->check ?
-                               SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
-}
-
 static inline void udp_tunnel_encap_enable(struct socket *sock)
 {
 #if IS_ENABLED(CONFIG_IPV6)
index 73ed2e951c020d28c21457d907c55764ea7b3d88..35437c779da8d6d6d113ed0deedbd39428852238 100644 (file)
@@ -252,7 +252,9 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
            (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
             skb->inner_protocol != htons(ETH_P_TEB) ||
             (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
-             sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
+             sizeof(struct udphdr) + sizeof(struct vxlanhdr)) ||
+            (skb->ip_summed != CHECKSUM_NONE &&
+             !can_checksum_protocol(features, inner_eth_hdr(skb)->h_proto))))
                return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
 
        return features;
index ef72c4aada566bb853a09c6c4df95661f6233153..d3e756539d44c6c94941ee457914785c0566a8e4 100644 (file)
@@ -171,6 +171,77 @@ TRACE_EVENT(rcu_grace_period_init,
                  __entry->grplo, __entry->grphi, __entry->qsmask)
 );
 
+/*
+ * Tracepoint for expedited grace-period events.  Takes a string identifying
+ * the RCU flavor, the expedited grace-period sequence number, and a string
+ * identifying the grace-period-related event as follows:
+ *
+ *     "snap": Captured snapshot of expedited grace period sequence number.
+ *     "start": Started a real expedited grace period.
+ *     "end": Ended a real expedited grace period.
+ *     "endwake": Woke piggybackers up.
+ *     "done": Someone else did the expedited grace period for us.
+ */
+TRACE_EVENT(rcu_exp_grace_period,
+
+       TP_PROTO(const char *rcuname, unsigned long gpseq, const char *gpevent),
+
+       TP_ARGS(rcuname, gpseq, gpevent),
+
+       TP_STRUCT__entry(
+               __field(const char *, rcuname)
+               __field(unsigned long, gpseq)
+               __field(const char *, gpevent)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->gpseq = gpseq;
+               __entry->gpevent = gpevent;
+       ),
+
+       TP_printk("%s %lu %s",
+                 __entry->rcuname, __entry->gpseq, __entry->gpevent)
+);
+
+/*
+ * Tracepoint for expedited grace-period funnel-locking events.  Takes a
+ * string identifying the RCU flavor, an integer identifying the rcu_node
+ * combining-tree level, another pair of integers identifying the lowest-
+ * and highest-numbered CPU associated with the current rcu_node structure,
+ * and a string.  identifying the grace-period-related event as follows:
+ *
+ *     "nxtlvl": Advance to next level of rcu_node funnel
+ *     "wait": Wait for someone else to do expedited GP
+ */
+TRACE_EVENT(rcu_exp_funnel_lock,
+
+       TP_PROTO(const char *rcuname, u8 level, int grplo, int grphi,
+                const char *gpevent),
+
+       TP_ARGS(rcuname, level, grplo, grphi, gpevent),
+
+       TP_STRUCT__entry(
+               __field(const char *, rcuname)
+               __field(u8, level)
+               __field(int, grplo)
+               __field(int, grphi)
+               __field(const char *, gpevent)
+       ),
+
+       TP_fast_assign(
+               __entry->rcuname = rcuname;
+               __entry->level = level;
+               __entry->grplo = grplo;
+               __entry->grphi = grphi;
+               __entry->gpevent = gpevent;
+       ),
+
+       TP_printk("%s %d %d %d %s",
+                 __entry->rcuname, __entry->level, __entry->grplo,
+                 __entry->grphi, __entry->gpevent)
+);
+
 /*
  * Tracepoint for RCU no-CBs CPU callback handoffs.  This event is intended
  * to assist debugging of these handoffs.
@@ -704,11 +775,15 @@ TRACE_EVENT(rcu_barrier,
 #else /* #ifdef CONFIG_RCU_TRACE */
 
 #define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
-                                   qsmask) do { } while (0)
 #define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
                                      level, grplo, grphi, event) \
                                      do { } while (0)
+#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+                                   qsmask) do { } while (0)
+#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
+       do { } while (0)
+#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
+       do { } while (0)
 #define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
 #define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
 #define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
index 6e0f5f01734cb893bfec236bd39bb012d333e510..c51afb71bfabc658b16be2773ad5a0e78ca1d09b 100644 (file)
@@ -718,9 +718,9 @@ __SYSCALL(__NR_mlock2, sys_mlock2)
 #define __NR_copy_file_range 285
 __SYSCALL(__NR_copy_file_range, sys_copy_file_range)
 #define __NR_preadv2 286
-__SYSCALL(__NR_preadv2, sys_preadv2)
+__SC_COMP(__NR_preadv2, sys_preadv2, compat_sys_preadv2)
 #define __NR_pwritev2 287
-__SYSCALL(__NR_pwritev2, sys_pwritev2)
+__SC_COMP(__NR_pwritev2, sys_pwritev2, compat_sys_pwritev2)
 
 #undef __NR_syscalls
 #define __NR_syscalls 288
index f80277569f24d44380ce0eed7dc47ce43ad863e3..e601c8c3bdc777b6458fe99d39de1f91b16f66d9 100644 (file)
 #ifndef _LINUX_IF_H
 #define _LINUX_IF_H
 
+#include <linux/libc-compat.h>          /* for compatibility with glibc */
 #include <linux/types.h>               /* for "__kernel_caddr_t" et al */
 #include <linux/socket.h>              /* for "struct sockaddr" et al  */
 #include <linux/compiler.h>            /* for "__user" et al           */
 
+#if __UAPI_DEF_IF_IFNAMSIZ
 #define        IFNAMSIZ        16
+#endif /* __UAPI_DEF_IF_IFNAMSIZ */
 #define        IFALIASZ        256
 #include <linux/hdlc/ioctl.h>
 
+/* For glibc compatibility. An empty enum does not compile. */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && \
+    __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0
 /**
  * enum net_device_flags - &struct net_device flags
  *
@@ -68,6 +74,8 @@
  * @IFF_ECHO: echo sent packets. Volatile.
  */
 enum net_device_flags {
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
        IFF_UP                          = 1<<0,  /* sysfs */
        IFF_BROADCAST                   = 1<<1,  /* volatile */
        IFF_DEBUG                       = 1<<2,  /* sysfs */
@@ -84,11 +92,17 @@ enum net_device_flags {
        IFF_PORTSEL                     = 1<<13, /* sysfs */
        IFF_AUTOMEDIA                   = 1<<14, /* sysfs */
        IFF_DYNAMIC                     = 1<<15, /* sysfs */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
        IFF_LOWER_UP                    = 1<<16, /* volatile */
        IFF_DORMANT                     = 1<<17, /* volatile */
        IFF_ECHO                        = 1<<18, /* volatile */
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
 };
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO != 0 && __UAPI_DEF_IF_NET_DEVICE_FLAGS != 0 */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS
 #define IFF_UP                         IFF_UP
 #define IFF_BROADCAST                  IFF_BROADCAST
 #define IFF_DEBUG                      IFF_DEBUG
@@ -105,9 +119,13 @@ enum net_device_flags {
 #define IFF_PORTSEL                    IFF_PORTSEL
 #define IFF_AUTOMEDIA                  IFF_AUTOMEDIA
 #define IFF_DYNAMIC                    IFF_DYNAMIC
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS */
+
+#if __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 #define IFF_LOWER_UP                   IFF_LOWER_UP
 #define IFF_DORMANT                    IFF_DORMANT
 #define IFF_ECHO                       IFF_ECHO
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
 
 #define IFF_VOLATILE   (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
                IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
@@ -166,6 +184,8 @@ enum {
  *     being very small might be worth keeping for clean configuration.
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFMAP
 struct ifmap {
        unsigned long mem_start;
        unsigned long mem_end;
@@ -175,6 +195,7 @@ struct ifmap {
        unsigned char port;
        /* 3 bytes spare */
 };
+#endif /* __UAPI_DEF_IF_IFMAP */
 
 struct if_settings {
        unsigned int type;      /* Type of physical device or protocol */
@@ -200,6 +221,8 @@ struct if_settings {
  * remainder may be interface specific.
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFREQ
 struct ifreq {
 #define IFHWADDRLEN    6
        union
@@ -223,6 +246,7 @@ struct ifreq {
                struct  if_settings ifru_settings;
        } ifr_ifru;
 };
+#endif /* __UAPI_DEF_IF_IFREQ */
 
 #define ifr_name       ifr_ifrn.ifrn_name      /* interface name       */
 #define ifr_hwaddr     ifr_ifru.ifru_hwaddr    /* MAC address          */
@@ -249,6 +273,8 @@ struct ifreq {
  * must know all networks accessible).
  */
 
+/* for compatibility with glibc net/if.h */
+#if __UAPI_DEF_IF_IFCONF
 struct ifconf  {
        int     ifc_len;                        /* size of buffer       */
        union {
@@ -256,6 +282,8 @@ struct ifconf  {
                struct ifreq __user *ifcu_req;
        } ifc_ifcu;
 };
+#endif /* __UAPI_DEF_IF_IFCONF */
+
 #define        ifc_buf ifc_ifcu.ifcu_buf               /* buffer address       */
 #define        ifc_req ifc_ifcu.ifcu_req               /* array of structures  */
 
index 4c58d9917aa4ef75f0e0a8f1531864f66ea682bb..3411ed06b9c0b80f81e3443772f1d2059b094fd1 100644 (file)
@@ -19,6 +19,8 @@
 
 #define MACSEC_MAX_KEY_LEN 128
 
+#define MACSEC_KEYID_LEN 16
+
 #define MACSEC_DEFAULT_CIPHER_ID   0x0080020001000001ULL
 #define MACSEC_DEFAULT_CIPHER_ALT  0x0080C20001000001ULL
 
@@ -77,7 +79,7 @@ enum macsec_sa_attrs {
        MACSEC_SA_ATTR_ACTIVE, /* config/dump, u8 0..1 */
        MACSEC_SA_ATTR_PN,     /* config/dump, u32 */
        MACSEC_SA_ATTR_KEY,    /* config, data */
-       MACSEC_SA_ATTR_KEYID,  /* config/dump, u64 */
+       MACSEC_SA_ATTR_KEYID,  /* config/dump, 128-bit */
        MACSEC_SA_ATTR_STATS,  /* dump, nested, macsec_sa_stats_attr */
        __MACSEC_SA_ATTR_END,
        NUM_MACSEC_SA_ATTR = __MACSEC_SA_ATTR_END,
index 7d024ceb075d8d4cd657c1c25db37a748940fc89..d5e38c73377c05fa44607bffe8bfb01112330706 100644 (file)
 /* We have included glibc headers... */
 #if defined(__GLIBC__)
 
+/* Coordinate with glibc net/if.h header. */
+#if defined(_NET_IF_H)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+
+#define __UAPI_DEF_IF_IFCONF 0
+#define __UAPI_DEF_IF_IFMAP 0
+#define __UAPI_DEF_IF_IFNAMSIZ 0
+#define __UAPI_DEF_IF_IFREQ 0
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+#endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+
+#else /* _NET_IF_H */
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
+#endif /* _NET_IF_H */
+
 /* Coordinate with glibc netinet/in.h header. */
 #if defined(_NETINET_IN_H)
 
  * that we need. */
 #else /* !defined(__GLIBC__) */
 
+/* Definitions for if.h */
+#define __UAPI_DEF_IF_IFCONF 1
+#define __UAPI_DEF_IF_IFMAP 1
+#define __UAPI_DEF_IF_IFNAMSIZ 1
+#define __UAPI_DEF_IF_IFREQ 1
+/* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS 1
+/* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
+
 /* Definitions for in.h */
 #define __UAPI_DEF_IN_ADDR             1
 #define __UAPI_DEF_IN_IPPROTO          1
diff --git a/include/uapi/linux/rio_mport_cdev.h b/include/uapi/linux/rio_mport_cdev.h
new file mode 100644 (file)
index 0000000..5796bf1
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Integrated Device Technology Inc.
+ * Copyright (c) 2015, Prodrive Technologies
+ * Copyright (c) 2015, Texas Instruments Incorporated
+ * Copyright (c) 2015, RapidIO Trade Association
+ * All rights reserved.
+ *
+ * This software is available to you under a choice of one of two licenses.
+ * You may choose to be licensed under the terms of the GNU General Public
+ * License(GPL) Version 2, or the BSD-3 Clause license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the copyright holder nor the names of its contributors
+ * may be used to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RIO_MPORT_CDEV_H_
+#define _RIO_MPORT_CDEV_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct rio_mport_maint_io {
+       __u16 rioid;            /* destID of remote device */
+       __u8  hopcount;         /* hopcount to remote device */
+       __u8  pad0[5];
+       __u32 offset;           /* offset in register space */
+       __u32 length;           /* length in bytes */
+       __u64 buffer;           /* pointer to data buffer */
+};
+
+/*
+ * Definitions for RapidIO data transfers:
+ * - memory mapped (MAPPED)
+ * - packet generation from memory (TRANSFER)
+ */
+#define RIO_TRANSFER_MODE_MAPPED       (1 << 0)
+#define RIO_TRANSFER_MODE_TRANSFER     (1 << 1)
+#define RIO_CAP_DBL_SEND               (1 << 2)
+#define RIO_CAP_DBL_RECV               (1 << 3)
+#define RIO_CAP_PW_SEND                        (1 << 4)
+#define RIO_CAP_PW_RECV                        (1 << 5)
+#define RIO_CAP_MAP_OUTB               (1 << 6)
+#define RIO_CAP_MAP_INB                        (1 << 7)
+
+struct rio_mport_properties {
+       __u16 hdid;
+       __u8  id;                       /* Physical port ID */
+       __u8  index;
+       __u32 flags;
+       __u32 sys_size;         /* Default addressing size */
+       __u8  port_ok;
+       __u8  link_speed;
+       __u8  link_width;
+       __u8  pad0;
+       __u32 dma_max_sge;
+       __u32 dma_max_size;
+       __u32 dma_align;
+       __u32 transfer_mode;            /* Default transfer mode */
+       __u32 cap_sys_size;             /* Capable system sizes */
+       __u32 cap_addr_size;            /* Capable addressing sizes */
+       __u32 cap_transfer_mode;        /* Capable transfer modes */
+       __u32 cap_mport;                /* Mport capabilities */
+};
+
+/*
+ * Definitions for RapidIO events;
+ * - incoming port-writes
+ * - incoming doorbells
+ */
+#define RIO_DOORBELL   (1 << 0)
+#define RIO_PORTWRITE  (1 << 1)
+
+struct rio_doorbell {
+       __u16 rioid;
+       __u16 payload;
+};
+
+struct rio_doorbell_filter {
+       __u16 rioid;    /* Use RIO_INVALID_DESTID to match all ids */
+       __u16 low;
+       __u16 high;
+       __u16 pad0;
+};
+
+
+struct rio_portwrite {
+       __u32 payload[16];
+};
+
+struct rio_pw_filter {
+       __u32 mask;
+       __u32 low;
+       __u32 high;
+       __u32 pad0;
+};
+
+/* RapidIO base address for inbound requests set to value defined below
+ * indicates that no specific RIO-to-local address translation is requested
+ * and driver should use direct (one-to-one) address mapping.
+*/
+#define RIO_MAP_ANY_ADDR       (__u64)(~((__u64) 0))
+
+struct rio_mmap {
+       __u16 rioid;
+       __u16 pad0[3];
+       __u64 rio_addr;
+       __u64 length;
+       __u64 handle;
+       __u64 address;
+};
+
+struct rio_dma_mem {
+       __u64 length;           /* length of DMA memory */
+       __u64 dma_handle;       /* handle associated with this memory */
+       __u64 address;
+};
+
+struct rio_event {
+       __u32 header;   /* event type RIO_DOORBELL or RIO_PORTWRITE */
+       union {
+               struct rio_doorbell doorbell;   /* header for RIO_DOORBELL */
+               struct rio_portwrite portwrite; /* header for RIO_PORTWRITE */
+       } u;
+       __u32 pad0;
+};
+
+enum rio_transfer_sync {
+       RIO_TRANSFER_SYNC,      /* synchronous transfer */
+       RIO_TRANSFER_ASYNC,     /* asynchronous transfer */
+       RIO_TRANSFER_FAF,       /* fire-and-forget transfer */
+};
+
+enum rio_transfer_dir {
+       RIO_TRANSFER_DIR_READ,  /* Read operation */
+       RIO_TRANSFER_DIR_WRITE, /* Write operation */
+};
+
+/*
+ * RapidIO data exchange transactions are lists of individual transfers. Each
+ * transfer exchanges data between two RapidIO devices by remote direct memory
+ * access and has its own completion code.
+ *
+ * The RapidIO specification defines four types of data exchange requests:
+ * NREAD, NWRITE, SWRITE and NWRITE_R. The RapidIO DMA channel interface allows
+ * to specify the required type of write operation or combination of them when
+ * only the last data packet requires response.
+ *
+ * NREAD:    read up to 256 bytes from remote device memory into local memory
+ * NWRITE:   write up to 256 bytes from local memory to remote device memory
+ *           without confirmation
+ * SWRITE:   as NWRITE, but all addresses and payloads must be 64-bit aligned
+ * NWRITE_R: as NWRITE, but expect acknowledgment from remote device.
+ *
+ * The default exchange is chosen from NREAD and any of the WRITE modes as the
+ * driver sees fit. For write requests the user can explicitly choose between
+ * any of the write modes for each transaction.
+ */
+enum rio_exchange {
+       RIO_EXCHANGE_DEFAULT,   /* Default method */
+       RIO_EXCHANGE_NWRITE,    /* All packets using NWRITE */
+       RIO_EXCHANGE_SWRITE,    /* All packets using SWRITE */
+       RIO_EXCHANGE_NWRITE_R,  /* Last packet NWRITE_R, others NWRITE */
+       RIO_EXCHANGE_SWRITE_R,  /* Last packet NWRITE_R, others SWRITE */
+       RIO_EXCHANGE_NWRITE_R_ALL, /* All packets using NWRITE_R */
+};
+
+struct rio_transfer_io {
+       __u64 rio_addr; /* Address in target's RIO mem space */
+       __u64 loc_addr;
+       __u64 handle;
+       __u64 offset;   /* Offset in buffer */
+       __u64 length;   /* Length in bytes */
+       __u16 rioid;    /* Target destID */
+       __u16 method;   /* Data exchange method, one of rio_exchange enum */
+       __u32 completion_code;  /* Completion code for this transfer */
+};
+
+struct rio_transaction {
+       __u64 block;    /* Pointer to array of <count> transfers */
+       __u32 count;    /* Number of transfers */
+       __u32 transfer_mode;    /* Data transfer mode */
+       __u16 sync;     /* Synch method, one of rio_transfer_sync enum */
+       __u16 dir;      /* Transfer direction, one of rio_transfer_dir enum */
+       __u32 pad0;
+};
+
+struct rio_async_tx_wait {
+       __u32 token;    /* DMA transaction ID token */
+       __u32 timeout;  /* Wait timeout in msec, if 0 use default TO */
+};
+
+#define RIO_MAX_DEVNAME_SZ     20
+
+struct rio_rdev_info {
+       __u16 destid;
+       __u8 hopcount;
+       __u8 pad0;
+       __u32 comptag;
+       char name[RIO_MAX_DEVNAME_SZ + 1];
+};
+
+/* Driver IOCTL codes */
+#define RIO_MPORT_DRV_MAGIC           'm'
+
+#define RIO_MPORT_MAINT_HDID_SET       \
+       _IOW(RIO_MPORT_DRV_MAGIC, 1, __u16)
+#define RIO_MPORT_MAINT_COMPTAG_SET    \
+       _IOW(RIO_MPORT_DRV_MAGIC, 2, __u32)
+#define RIO_MPORT_MAINT_PORT_IDX_GET   \
+       _IOR(RIO_MPORT_DRV_MAGIC, 3, __u32)
+#define RIO_MPORT_GET_PROPERTIES \
+       _IOR(RIO_MPORT_DRV_MAGIC, 4, struct rio_mport_properties)
+#define RIO_MPORT_MAINT_READ_LOCAL \
+       _IOR(RIO_MPORT_DRV_MAGIC, 5, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_LOCAL \
+       _IOW(RIO_MPORT_DRV_MAGIC, 6, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_READ_REMOTE \
+       _IOR(RIO_MPORT_DRV_MAGIC, 7, struct rio_mport_maint_io)
+#define RIO_MPORT_MAINT_WRITE_REMOTE \
+       _IOW(RIO_MPORT_DRV_MAGIC, 8, struct rio_mport_maint_io)
+#define RIO_ENABLE_DOORBELL_RANGE      \
+       _IOW(RIO_MPORT_DRV_MAGIC, 9, struct rio_doorbell_filter)
+#define RIO_DISABLE_DOORBELL_RANGE     \
+       _IOW(RIO_MPORT_DRV_MAGIC, 10, struct rio_doorbell_filter)
+#define RIO_ENABLE_PORTWRITE_RANGE     \
+       _IOW(RIO_MPORT_DRV_MAGIC, 11, struct rio_pw_filter)
+#define RIO_DISABLE_PORTWRITE_RANGE    \
+       _IOW(RIO_MPORT_DRV_MAGIC, 12, struct rio_pw_filter)
+#define RIO_SET_EVENT_MASK             \
+       _IOW(RIO_MPORT_DRV_MAGIC, 13, __u32)
+#define RIO_GET_EVENT_MASK             \
+       _IOR(RIO_MPORT_DRV_MAGIC, 14, __u32)
+#define RIO_MAP_OUTBOUND \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 15, struct rio_mmap)
+#define RIO_UNMAP_OUTBOUND \
+       _IOW(RIO_MPORT_DRV_MAGIC, 16, struct rio_mmap)
+#define RIO_MAP_INBOUND \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 17, struct rio_mmap)
+#define RIO_UNMAP_INBOUND \
+       _IOW(RIO_MPORT_DRV_MAGIC, 18, __u64)
+#define RIO_ALLOC_DMA \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 19, struct rio_dma_mem)
+#define RIO_FREE_DMA \
+       _IOW(RIO_MPORT_DRV_MAGIC, 20, __u64)
+#define RIO_TRANSFER \
+       _IOWR(RIO_MPORT_DRV_MAGIC, 21, struct rio_transaction)
+#define RIO_WAIT_FOR_ASYNC \
+       _IOW(RIO_MPORT_DRV_MAGIC, 22, struct rio_async_tx_wait)
+#define RIO_DEV_ADD \
+       _IOW(RIO_MPORT_DRV_MAGIC, 23, struct rio_rdev_info)
+#define RIO_DEV_DEL \
+       _IOW(RIO_MPORT_DRV_MAGIC, 24, struct rio_rdev_info)
+
+#endif /* _RIO_MPORT_CDEV_H_ */
index e1bd50c29ded87e7011c84a0b9e222ba9b437ff2..cd0804b6bfa2ed0980fe1cec8e01201833cbdaef 100644 (file)
@@ -7,4 +7,9 @@
 #define SS_ONSTACK     1
 #define SS_DISABLE     2
 
+/* bit-flags */
+#define SS_AUTODISARM  (1U << 31)      /* disable sas during sighandling */
+/* mask for all SS_xxx flags */
+#define SS_FLAG_BITS   SS_AUTODISARM
+
 #endif /* _UAPI_LINUX_SIGNAL_H */
index 3f10e5317b46b3858479b2af949d5a719bc72405..8f3a8f606fd95b723d9ed26e179c8a741250717f 100644 (file)
@@ -45,9 +45,7 @@
 
 static inline __attribute_const__ __u16 __fswab16(__u16 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP16__
-       return __builtin_bswap16(val);
-#elif defined (__arch_swab16)
+#if defined (__arch_swab16)
        return __arch_swab16(val);
 #else
        return ___constant_swab16(val);
@@ -56,9 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
 
 static inline __attribute_const__ __u32 __fswab32(__u32 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP32__
-       return __builtin_bswap32(val);
-#elif defined(__arch_swab32)
+#if defined(__arch_swab32)
        return __arch_swab32(val);
 #else
        return ___constant_swab32(val);
@@ -67,9 +63,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
 
 static inline __attribute_const__ __u64 __fswab64(__u64 val)
 {
-#ifdef __HAVE_BUILTIN_BSWAP64__
-       return __builtin_bswap64(val);
-#elif defined (__arch_swab64)
+#if defined (__arch_swab64)
        return __arch_swab64(val);
 #elif defined(__SWAB_64_THRU_32__)
        __u32 h = val >> 32;
@@ -102,28 +96,40 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
  * __swab16 - return a byteswapped 16-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP16__
+#define __swab16(x) (__u16)__builtin_bswap16((__u16)(x))
+#else
 #define __swab16(x)                            \
        (__builtin_constant_p((__u16)(x)) ?     \
        ___constant_swab16(x) :                 \
        __fswab16(x))
+#endif
 
 /**
  * __swab32 - return a byteswapped 32-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP32__
+#define __swab32(x) (__u32)__builtin_bswap32((__u32)(x))
+#else
 #define __swab32(x)                            \
        (__builtin_constant_p((__u32)(x)) ?     \
        ___constant_swab32(x) :                 \
        __fswab32(x))
+#endif
 
 /**
  * __swab64 - return a byteswapped 64-bit value
  * @x: value to byteswap
  */
+#ifdef __HAVE_BUILTIN_BSWAP64__
+#define __swab64(x) (__u64)__builtin_bswap64((__u64)(x))
+#else
 #define __swab64(x)                            \
        (__builtin_constant_p((__u64)(x)) ?     \
        ___constant_swab64(x) :                 \
        __fswab64(x))
+#endif
 
 /**
  * __swahw32 - return a word-swapped 32-bit value
index 242cf0c6e33d37f229a224839ca6f679bb142674..e3969bd939e41d7fb1ff47f61344fad5f1cae482 100644 (file)
@@ -10,3 +10,4 @@ header-y += tc_skbedit.h
 header-y += tc_vlan.h
 header-y += tc_bpf.h
 header-y += tc_connmark.h
+header-y += tc_ife.h
index 96294ac937552178531266cb16ce04185e47c99a..9dc46cb8a0fd79be7f4bdae7ae8f5f4dbdcdb7f4 100644 (file)
@@ -15,9 +15,9 @@
  */
 
 #define xen_pfn_to_page(xen_pfn)       \
-       ((pfn_to_page(((unsigned long)(xen_pfn) << XEN_PAGE_SHIFT) >> PAGE_SHIFT)))
+       (pfn_to_page((unsigned long)(xen_pfn) >> (PAGE_SHIFT - XEN_PAGE_SHIFT)))
 #define page_to_xen_pfn(page)          \
-       (((page_to_pfn(page)) << PAGE_SHIFT) >> XEN_PAGE_SHIFT)
+       ((page_to_pfn(page)) << (PAGE_SHIFT - XEN_PAGE_SHIFT))
 
 #define XEN_PFN_PER_PAGE       (PAGE_SIZE / XEN_PAGE_SIZE)
 
index f2ece3c174a5b540b40492833048d6c8bd9f3f1c..8f94ca1860cfdcdd9509e40d40c3ad7cbf572e2d 100644 (file)
@@ -31,10 +31,10 @@ static void *bpf_any_get(void *raw, enum bpf_type type)
 {
        switch (type) {
        case BPF_TYPE_PROG:
-               atomic_inc(&((struct bpf_prog *)raw)->aux->refcnt);
+               raw = bpf_prog_inc(raw);
                break;
        case BPF_TYPE_MAP:
-               bpf_map_inc(raw, true);
+               raw = bpf_map_inc(raw, true);
                break;
        default:
                WARN_ON_ONCE(1);
@@ -297,7 +297,8 @@ static void *bpf_obj_do_get(const struct filename *pathname,
                goto out;
 
        raw = bpf_any_get(inode->i_private, *type);
-       touch_atime(&path);
+       if (!IS_ERR(raw))
+               touch_atime(&path);
 
        path_put(&path);
        return raw;
index adc5e4bd74f8bc3310cfe4b75257647886b2e11f..cf5e9f7ad13ad13ebb5b6bc5a775beca834c7c86 100644 (file)
@@ -218,11 +218,18 @@ struct bpf_map *__bpf_map_get(struct fd f)
        return f.file->private_data;
 }
 
-void bpf_map_inc(struct bpf_map *map, bool uref)
+/* prog's and map's refcnt limit */
+#define BPF_MAX_REFCNT 32768
+
+struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
 {
-       atomic_inc(&map->refcnt);
+       if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&map->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
        if (uref)
                atomic_inc(&map->usercnt);
+       return map;
 }
 
 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
@@ -234,7 +241,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
        if (IS_ERR(map))
                return map;
 
-       bpf_map_inc(map, true);
+       map = bpf_map_inc(map, true);
        fdput(f);
 
        return map;
@@ -658,6 +665,15 @@ static struct bpf_prog *__bpf_prog_get(struct fd f)
        return f.file->private_data;
 }
 
+struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
+{
+       if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
+               atomic_dec(&prog->aux->refcnt);
+               return ERR_PTR(-EBUSY);
+       }
+       return prog;
+}
+
 /* called by sockets/tracing/seccomp before attaching program to an event
  * pairs with bpf_prog_put()
  */
@@ -670,7 +686,7 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
        if (IS_ERR(prog))
                return prog;
 
-       atomic_inc(&prog->aux->refcnt);
+       prog = bpf_prog_inc(prog);
        fdput(f);
 
        return prog;
index db2574e7b8b008395bb8fee53baf513fdc56c392..c5c17a62f509f67385cfb9a587e702646cc19b80 100644 (file)
@@ -239,16 +239,6 @@ static const char * const reg_type_str[] = {
        [CONST_IMM]             = "imm",
 };
 
-static const struct {
-       int map_type;
-       int func_id;
-} func_limit[] = {
-       {BPF_MAP_TYPE_PROG_ARRAY, BPF_FUNC_tail_call},
-       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_read},
-       {BPF_MAP_TYPE_PERF_EVENT_ARRAY, BPF_FUNC_perf_event_output},
-       {BPF_MAP_TYPE_STACK_TRACE, BPF_FUNC_get_stackid},
-};
-
 static void print_verifier_state(struct verifier_env *env)
 {
        enum bpf_reg_type t;
@@ -921,27 +911,52 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
 
 static int check_map_func_compatibility(struct bpf_map *map, int func_id)
 {
-       bool bool_map, bool_func;
-       int i;
-
        if (!map)
                return 0;
 
-       for (i = 0; i < ARRAY_SIZE(func_limit); i++) {
-               bool_map = (map->map_type == func_limit[i].map_type);
-               bool_func = (func_id == func_limit[i].func_id);
-               /* only when map & func pair match it can continue.
-                * don't allow any other map type to be passed into
-                * the special func;
-                */
-               if (bool_func && bool_map != bool_func) {
-                       verbose("cannot pass map_type %d into func %d\n",
-                               map->map_type, func_id);
-                       return -EINVAL;
-               }
+       /* We need a two way check, first is from map perspective ... */
+       switch (map->map_type) {
+       case BPF_MAP_TYPE_PROG_ARRAY:
+               if (func_id != BPF_FUNC_tail_call)
+                       goto error;
+               break;
+       case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
+               if (func_id != BPF_FUNC_perf_event_read &&
+                   func_id != BPF_FUNC_perf_event_output)
+                       goto error;
+               break;
+       case BPF_MAP_TYPE_STACK_TRACE:
+               if (func_id != BPF_FUNC_get_stackid)
+                       goto error;
+               break;
+       default:
+               break;
+       }
+
+       /* ... and second from the function itself. */
+       switch (func_id) {
+       case BPF_FUNC_tail_call:
+               if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
+                       goto error;
+               break;
+       case BPF_FUNC_perf_event_read:
+       case BPF_FUNC_perf_event_output:
+               if (map->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY)
+                       goto error;
+               break;
+       case BPF_FUNC_get_stackid:
+               if (map->map_type != BPF_MAP_TYPE_STACK_TRACE)
+                       goto error;
+               break;
+       default:
+               break;
        }
 
        return 0;
+error:
+       verbose("cannot pass map_type %d into func %d\n",
+               map->map_type, func_id);
+       return -EINVAL;
 }
 
 static int check_call(struct verifier_env *env, int func_id)
@@ -2049,15 +2064,18 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
                                return -E2BIG;
                        }
 
-                       /* remember this map */
-                       env->used_maps[env->used_map_cnt++] = map;
-
                        /* hold the map. If the program is rejected by verifier,
                         * the map will be released by release_maps() or it
                         * will be used by the valid program until it's unloaded
                         * and all maps are released in free_bpf_prog_info()
                         */
-                       bpf_map_inc(map, false);
+                       map = bpf_map_inc(map, false);
+                       if (IS_ERR(map)) {
+                               fdput(f);
+                               return PTR_ERR(map);
+                       }
+                       env->used_maps[env->used_map_cnt++] = map;
+
                        fdput(f);
 next_insn:
                        insn++;
index 909a7d31ffd3d3083c2253aefd4253715bcb2278..86cb5c6e89320f28e17691c6d69e58c9dfde81fb 100644 (file)
@@ -1215,6 +1215,41 @@ static void cgroup_destroy_root(struct cgroup_root *root)
        cgroup_free_root(root);
 }
 
+/*
+ * look up cgroup associated with current task's cgroup namespace on the
+ * specified hierarchy
+ */
+static struct cgroup *
+current_cgns_cgroup_from_root(struct cgroup_root *root)
+{
+       struct cgroup *res = NULL;
+       struct css_set *cset;
+
+       lockdep_assert_held(&css_set_lock);
+
+       rcu_read_lock();
+
+       cset = current->nsproxy->cgroup_ns->root_cset;
+       if (cset == &init_css_set) {
+               res = &root->cgrp;
+       } else {
+               struct cgrp_cset_link *link;
+
+               list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
+                       struct cgroup *c = link->cgrp;
+
+                       if (c->root == root) {
+                               res = c;
+                               break;
+                       }
+               }
+       }
+       rcu_read_unlock();
+
+       BUG_ON(!res);
+       return res;
+}
+
 /* look up cgroup associated with given css_set on the specified hierarchy */
 static struct cgroup *cset_cgroup_from_root(struct css_set *cset,
                                            struct cgroup_root *root)
@@ -1593,6 +1628,33 @@ static int rebind_subsystems(struct cgroup_root *dst_root, u16 ss_mask)
        return 0;
 }
 
+static int cgroup_show_path(struct seq_file *sf, struct kernfs_node *kf_node,
+                           struct kernfs_root *kf_root)
+{
+       int len = 0;
+       char *buf = NULL;
+       struct cgroup_root *kf_cgroot = cgroup_root_from_kf(kf_root);
+       struct cgroup *ns_cgroup;
+
+       buf = kmalloc(PATH_MAX, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       spin_lock_bh(&css_set_lock);
+       ns_cgroup = current_cgns_cgroup_from_root(kf_cgroot);
+       len = kernfs_path_from_node(kf_node, ns_cgroup->kn, buf, PATH_MAX);
+       spin_unlock_bh(&css_set_lock);
+
+       if (len >= PATH_MAX)
+               len = -ERANGE;
+       else if (len > 0) {
+               seq_escape(sf, buf, " \t\n\\");
+               len = 0;
+       }
+       kfree(buf);
+       return len;
+}
+
 static int cgroup_show_options(struct seq_file *seq,
                               struct kernfs_root *kf_root)
 {
@@ -5433,6 +5495,7 @@ static struct kernfs_syscall_ops cgroup_kf_syscall_ops = {
        .mkdir                  = cgroup_mkdir,
        .rmdir                  = cgroup_rmdir,
        .rename                 = cgroup_rename,
+       .show_path              = cgroup_show_path,
 };
 
 static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
index 4e2ebf6f2f1f9fcec32b5d30f69267df12ba4654..c0ded24166158f26b0c4f3ce1c47dd065e28700c 100644 (file)
@@ -351,7 +351,7 @@ static struct srcu_struct pmus_srcu;
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
  */
-int sysctl_perf_event_paranoid __read_mostly = 1;
+int sysctl_perf_event_paranoid __read_mostly = 2;
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
index c61f0cbd308b5b4456e69c1539f9b334a34c0632..7611d0f66cf85adfab20f6374517a0efc645ebe4 100644 (file)
@@ -347,6 +347,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
                         bool truncated)
 {
        struct ring_buffer *rb = handle->rb;
+       bool wakeup = truncated;
        unsigned long aux_head;
        u64 flags = 0;
 
@@ -375,9 +376,16 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
        aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
 
        if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
-               perf_output_wakeup(handle);
+               wakeup = true;
                local_add(rb->aux_watermark, &rb->aux_wakeup);
        }
+
+       if (wakeup) {
+               if (truncated)
+                       handle->event->pending_disable = 1;
+               perf_output_wakeup(handle);
+       }
+
        handle->event = NULL;
 
        local_set(&rb->aux_nest, 0);
index d277e83ed3e06d702875e4823a827c8a4c48b729..3e8451527cbe2ac87eed8bc3c9d64aeade9d10ef 100644 (file)
@@ -1494,7 +1494,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
         * sigaltstack should be cleared when sharing the same VM
         */
        if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
-               p->sas_ss_sp = p->sas_ss_size = 0;
+               sas_ss_reset(p);
 
        /*
         * Syscall tracing and stepping should be turned off in the
index 032b2c015beb6c01710f3a4c0c1eab6666d65e65..18dfc485225c3954ccf29a8743bc636f10db7d41 100644 (file)
@@ -5,6 +5,7 @@ KCOV_INSTRUMENT := n
 obj-y += update.o sync.o
 obj-$(CONFIG_SRCU) += srcu.o
 obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o
+obj-$(CONFIG_RCU_PERF_TEST) += rcuperf.o
 obj-$(CONFIG_TREE_RCU) += tree.o
 obj-$(CONFIG_PREEMPT_RCU) += tree.o
 obj-$(CONFIG_TREE_RCU_TRACE) += tree_trace.o
diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
new file mode 100644 (file)
index 0000000..3cee0d8
--- /dev/null
@@ -0,0 +1,655 @@
+/*
+ * Read-Copy Update module-based performance-test facility
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2015
+ *
+ * Authors: Paul E. McKenney <paulmck@us.ibm.com>
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kthread.h>
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+#include <linux/freezer.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/stat.h>
+#include <linux/srcu.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/torture.h>
+#include <linux/vmalloc.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
+
+#define PERF_FLAG "-perf:"
+#define PERFOUT_STRING(s) \
+       pr_alert("%s" PERF_FLAG s "\n", perf_type)
+#define VERBOSE_PERFOUT_STRING(s) \
+       do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
+#define VERBOSE_PERFOUT_ERRSTRING(s) \
+       do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
+
+torture_param(bool, gp_exp, true, "Use expedited GP wait primitives");
+torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
+torture_param(int, nreaders, -1, "Number of RCU reader threads");
+torture_param(int, nwriters, -1, "Number of RCU updater threads");
+torture_param(bool, shutdown, false, "Shutdown at end of performance tests.");
+torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
+
+static char *perf_type = "rcu";
+module_param(perf_type, charp, 0444);
+MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
+
+static int nrealreaders;
+static int nrealwriters;
+static struct task_struct **writer_tasks;
+static struct task_struct **reader_tasks;
+static struct task_struct *shutdown_task;
+
+static u64 **writer_durations;
+static int *writer_n_durations;
+static atomic_t n_rcu_perf_reader_started;
+static atomic_t n_rcu_perf_writer_started;
+static atomic_t n_rcu_perf_writer_finished;
+static wait_queue_head_t shutdown_wq;
+static u64 t_rcu_perf_writer_started;
+static u64 t_rcu_perf_writer_finished;
+static unsigned long b_rcu_perf_writer_started;
+static unsigned long b_rcu_perf_writer_finished;
+
+static int rcu_perf_writer_state;
+#define RTWS_INIT              0
+#define RTWS_EXP_SYNC          1
+#define RTWS_SYNC              2
+#define RTWS_IDLE              2
+#define RTWS_STOPPING          3
+
+#define MAX_MEAS 10000
+#define MIN_MEAS 100
+
+#if defined(MODULE) || defined(CONFIG_RCU_PERF_TEST_RUNNABLE)
+#define RCUPERF_RUNNABLE_INIT 1
+#else
+#define RCUPERF_RUNNABLE_INIT 0
+#endif
+static int perf_runnable = RCUPERF_RUNNABLE_INIT;
+module_param(perf_runnable, int, 0444);
+MODULE_PARM_DESC(perf_runnable, "Start rcuperf at boot");
+
+/*
+ * Operations vector for selecting different types of tests.
+ */
+
+struct rcu_perf_ops {
+       int ptype;
+       void (*init)(void);
+       void (*cleanup)(void);
+       int (*readlock)(void);
+       void (*readunlock)(int idx);
+       unsigned long (*started)(void);
+       unsigned long (*completed)(void);
+       unsigned long (*exp_completed)(void);
+       void (*sync)(void);
+       void (*exp_sync)(void);
+       const char *name;
+};
+
+static struct rcu_perf_ops *cur_ops;
+
+/*
+ * Definitions for rcu perf testing.
+ */
+
+static int rcu_perf_read_lock(void) __acquires(RCU)
+{
+       rcu_read_lock();
+       return 0;
+}
+
+static void rcu_perf_read_unlock(int idx) __releases(RCU)
+{
+       rcu_read_unlock();
+}
+
+static unsigned long __maybe_unused rcu_no_completed(void)
+{
+       return 0;
+}
+
+static void rcu_sync_perf_init(void)
+{
+}
+
+static struct rcu_perf_ops rcu_ops = {
+       .ptype          = RCU_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = rcu_perf_read_lock,
+       .readunlock     = rcu_perf_read_unlock,
+       .started        = rcu_batches_started,
+       .completed      = rcu_batches_completed,
+       .exp_completed  = rcu_exp_batches_completed,
+       .sync           = synchronize_rcu,
+       .exp_sync       = synchronize_rcu_expedited,
+       .name           = "rcu"
+};
+
+/*
+ * Definitions for rcu_bh perf testing.
+ */
+
+static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
+{
+       rcu_read_lock_bh();
+       return 0;
+}
+
+static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
+{
+       rcu_read_unlock_bh();
+}
+
+static struct rcu_perf_ops rcu_bh_ops = {
+       .ptype          = RCU_BH_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = rcu_bh_perf_read_lock,
+       .readunlock     = rcu_bh_perf_read_unlock,
+       .started        = rcu_batches_started_bh,
+       .completed      = rcu_batches_completed_bh,
+       .exp_completed  = rcu_exp_batches_completed_sched,
+       .sync           = synchronize_rcu_bh,
+       .exp_sync       = synchronize_rcu_bh_expedited,
+       .name           = "rcu_bh"
+};
+
+/*
+ * Definitions for srcu perf testing.
+ */
+
+DEFINE_STATIC_SRCU(srcu_ctl_perf);
+static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
+
+static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
+{
+       return srcu_read_lock(srcu_ctlp);
+}
+
+static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
+{
+       srcu_read_unlock(srcu_ctlp, idx);
+}
+
+static unsigned long srcu_perf_completed(void)
+{
+       return srcu_batches_completed(srcu_ctlp);
+}
+
+static void srcu_perf_synchronize(void)
+{
+       synchronize_srcu(srcu_ctlp);
+}
+
+static void srcu_perf_synchronize_expedited(void)
+{
+       synchronize_srcu_expedited(srcu_ctlp);
+}
+
+static struct rcu_perf_ops srcu_ops = {
+       .ptype          = SRCU_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = srcu_perf_read_lock,
+       .readunlock     = srcu_perf_read_unlock,
+       .started        = NULL,
+       .completed      = srcu_perf_completed,
+       .exp_completed  = srcu_perf_completed,
+       .sync           = srcu_perf_synchronize,
+       .exp_sync       = srcu_perf_synchronize_expedited,
+       .name           = "srcu"
+};
+
+/*
+ * Definitions for sched perf testing.
+ */
+
+static int sched_perf_read_lock(void)
+{
+       preempt_disable();
+       return 0;
+}
+
+static void sched_perf_read_unlock(int idx)
+{
+       preempt_enable();
+}
+
+static struct rcu_perf_ops sched_ops = {
+       .ptype          = RCU_SCHED_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = sched_perf_read_lock,
+       .readunlock     = sched_perf_read_unlock,
+       .started        = rcu_batches_started_sched,
+       .completed      = rcu_batches_completed_sched,
+       .exp_completed  = rcu_exp_batches_completed_sched,
+       .sync           = synchronize_sched,
+       .exp_sync       = synchronize_sched_expedited,
+       .name           = "sched"
+};
+
+#ifdef CONFIG_TASKS_RCU
+
+/*
+ * Definitions for RCU-tasks perf testing.
+ */
+
+static int tasks_perf_read_lock(void)
+{
+       return 0;
+}
+
+static void tasks_perf_read_unlock(int idx)
+{
+}
+
+static struct rcu_perf_ops tasks_ops = {
+       .ptype          = RCU_TASKS_FLAVOR,
+       .init           = rcu_sync_perf_init,
+       .readlock       = tasks_perf_read_lock,
+       .readunlock     = tasks_perf_read_unlock,
+       .started        = rcu_no_completed,
+       .completed      = rcu_no_completed,
+       .sync           = synchronize_rcu_tasks,
+       .exp_sync       = synchronize_rcu_tasks,
+       .name           = "tasks"
+};
+
+#define RCUPERF_TASKS_OPS &tasks_ops,
+
+static bool __maybe_unused torturing_tasks(void)
+{
+       return cur_ops == &tasks_ops;
+}
+
+#else /* #ifdef CONFIG_TASKS_RCU */
+
+#define RCUPERF_TASKS_OPS
+
+static bool __maybe_unused torturing_tasks(void)
+{
+       return false;
+}
+
+#endif /* #else #ifdef CONFIG_TASKS_RCU */
+
+/*
+ * If performance tests complete, wait for shutdown to commence.
+ */
+static void rcu_perf_wait_shutdown(void)
+{
+       cond_resched_rcu_qs();
+       if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
+               return;
+       while (!torture_must_stop())
+               schedule_timeout_uninterruptible(1);
+}
+
+/*
+ * RCU perf reader kthread.  Repeatedly does empty RCU read-side
+ * critical section, minimizing update-side interference.
+ */
+static int
+rcu_perf_reader(void *arg)
+{
+       unsigned long flags;
+       int idx;
+       long me = (long)arg;
+
+       VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
+       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+       set_user_nice(current, MAX_NICE);
+       atomic_inc(&n_rcu_perf_reader_started);
+
+       do {
+               local_irq_save(flags);
+               idx = cur_ops->readlock();
+               cur_ops->readunlock(idx);
+               local_irq_restore(flags);
+               rcu_perf_wait_shutdown();
+       } while (!torture_must_stop());
+       torture_kthread_stopping("rcu_perf_reader");
+       return 0;
+}
+
+/*
+ * RCU perf writer kthread.  Repeatedly does a grace period.
+ */
+static int
+rcu_perf_writer(void *arg)
+{
+       int i = 0;
+       int i_max;
+       long me = (long)arg;
+       struct sched_param sp;
+       bool started = false, done = false, alldone = false;
+       u64 t;
+       u64 *wdp;
+       u64 *wdpp = writer_durations[me];
+
+       VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
+       WARN_ON(rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp);
+       WARN_ON(rcu_gp_is_normal() && gp_exp);
+       WARN_ON(!wdpp);
+       set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
+       sp.sched_priority = 1;
+       sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+
+       if (holdoff)
+               schedule_timeout_uninterruptible(holdoff * HZ);
+
+       t = ktime_get_mono_fast_ns();
+       if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
+               t_rcu_perf_writer_started = t;
+               if (gp_exp) {
+                       b_rcu_perf_writer_started =
+                               cur_ops->exp_completed() / 2;
+               } else {
+                       b_rcu_perf_writer_started =
+                               cur_ops->completed();
+               }
+       }
+
+       do {
+               wdp = &wdpp[i];
+               *wdp = ktime_get_mono_fast_ns();
+               if (gp_exp) {
+                       rcu_perf_writer_state = RTWS_EXP_SYNC;
+                       cur_ops->exp_sync();
+               } else {
+                       rcu_perf_writer_state = RTWS_SYNC;
+                       cur_ops->sync();
+               }
+               rcu_perf_writer_state = RTWS_IDLE;
+               t = ktime_get_mono_fast_ns();
+               *wdp = t - *wdp;
+               i_max = i;
+               if (!started &&
+                   atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
+                       started = true;
+               if (!done && i >= MIN_MEAS) {
+                       done = true;
+                       sp.sched_priority = 0;
+                       sched_setscheduler_nocheck(current,
+                                                  SCHED_NORMAL, &sp);
+                       pr_alert("%s" PERF_FLAG
+                                "rcu_perf_writer %ld has %d measurements\n",
+                                perf_type, me, MIN_MEAS);
+                       if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
+                           nrealwriters) {
+                               schedule_timeout_interruptible(10);
+                               rcu_ftrace_dump(DUMP_ALL);
+                               PERFOUT_STRING("Test complete");
+                               t_rcu_perf_writer_finished = t;
+                               if (gp_exp) {
+                                       b_rcu_perf_writer_finished =
+                                               cur_ops->exp_completed() / 2;
+                               } else {
+                                       b_rcu_perf_writer_finished =
+                                               cur_ops->completed();
+                               }
+                               if (shutdown) {
+                                       smp_mb(); /* Assign before wake. */
+                                       wake_up(&shutdown_wq);
+                               }
+                       }
+               }
+               if (done && !alldone &&
+                   atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
+                       alldone = true;
+               if (started && !alldone && i < MAX_MEAS - 1)
+                       i++;
+               rcu_perf_wait_shutdown();
+       } while (!torture_must_stop());
+       rcu_perf_writer_state = RTWS_STOPPING;
+       writer_n_durations[me] = i_max;
+       torture_kthread_stopping("rcu_perf_writer");
+       return 0;
+}
+
+static inline void
+rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
+{
+       pr_alert("%s" PERF_FLAG
+                "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
+                perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
+}
+
+static void
+rcu_perf_cleanup(void)
+{
+       int i;
+       int j;
+       int ngps = 0;
+       u64 *wdp;
+       u64 *wdpp;
+
+       if (torture_cleanup_begin())
+               return;
+
+       if (reader_tasks) {
+               for (i = 0; i < nrealreaders; i++)
+                       torture_stop_kthread(rcu_perf_reader,
+                                            reader_tasks[i]);
+               kfree(reader_tasks);
+       }
+
+       if (writer_tasks) {
+               for (i = 0; i < nrealwriters; i++) {
+                       torture_stop_kthread(rcu_perf_writer,
+                                            writer_tasks[i]);
+                       if (!writer_n_durations)
+                               continue;
+                       j = writer_n_durations[i];
+                       pr_alert("%s%s writer %d gps: %d\n",
+                                perf_type, PERF_FLAG, i, j);
+                       ngps += j;
+               }
+               pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
+                        perf_type, PERF_FLAG,
+                        t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
+                        t_rcu_perf_writer_finished -
+                        t_rcu_perf_writer_started,
+                        ngps,
+                        b_rcu_perf_writer_finished -
+                        b_rcu_perf_writer_started);
+               for (i = 0; i < nrealwriters; i++) {
+                       if (!writer_durations)
+                               break;
+                       if (!writer_n_durations)
+                               continue;
+                       wdpp = writer_durations[i];
+                       if (!wdpp)
+                               continue;
+                       for (j = 0; j <= writer_n_durations[i]; j++) {
+                               wdp = &wdpp[j];
+                               pr_alert("%s%s %4d writer-duration: %5d %llu\n",
+                                       perf_type, PERF_FLAG,
+                                       i, j, *wdp);
+                               if (j % 100 == 0)
+                                       schedule_timeout_uninterruptible(1);
+                       }
+                       kfree(writer_durations[i]);
+               }
+               kfree(writer_tasks);
+               kfree(writer_durations);
+               kfree(writer_n_durations);
+       }
+
+       /* Do flavor-specific cleanup operations.  */
+       if (cur_ops->cleanup != NULL)
+               cur_ops->cleanup();
+
+       torture_cleanup_end();
+}
+
+/*
+ * Return the number if non-negative.  If -1, the number of CPUs.
+ * If less than -1, that much less than the number of CPUs, but
+ * at least one.
+ */
+static int compute_real(int n)
+{
+       int nr;
+
+       if (n >= 0) {
+               nr = n;
+       } else {
+               nr = num_online_cpus() + 1 + n;
+               if (nr <= 0)
+                       nr = 1;
+       }
+       return nr;
+}
+
+/*
+ * RCU perf shutdown kthread.  Just waits to be awakened, then shuts
+ * down system.
+ */
+static int
+rcu_perf_shutdown(void *arg)
+{
+       do {
+               wait_event(shutdown_wq,
+                          atomic_read(&n_rcu_perf_writer_finished) >=
+                          nrealwriters);
+       } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
+       smp_mb(); /* Wake before output. */
+       rcu_perf_cleanup();
+       kernel_power_off();
+       return -EINVAL;
+}
+
+static int __init
+rcu_perf_init(void)
+{
+       long i;
+       int firsterr = 0;
+       static struct rcu_perf_ops *perf_ops[] = {
+               &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
+               RCUPERF_TASKS_OPS
+       };
+
+       if (!torture_init_begin(perf_type, verbose, &perf_runnable))
+               return -EBUSY;
+
+       /* Process args and tell the world that the perf'er is on the job. */
+       for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
+               cur_ops = perf_ops[i];
+               if (strcmp(perf_type, cur_ops->name) == 0)
+                       break;
+       }
+       if (i == ARRAY_SIZE(perf_ops)) {
+               pr_alert("rcu-perf: invalid perf type: \"%s\"\n",
+                        perf_type);
+               pr_alert("rcu-perf types:");
+               for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
+                       pr_alert(" %s", perf_ops[i]->name);
+               pr_alert("\n");
+               firsterr = -EINVAL;
+               goto unwind;
+       }
+       if (cur_ops->init)
+               cur_ops->init();
+
+       nrealwriters = compute_real(nwriters);
+       nrealreaders = compute_real(nreaders);
+       atomic_set(&n_rcu_perf_reader_started, 0);
+       atomic_set(&n_rcu_perf_writer_started, 0);
+       atomic_set(&n_rcu_perf_writer_finished, 0);
+       rcu_perf_print_module_parms(cur_ops, "Start of test");
+
+       /* Start up the kthreads. */
+
+       if (shutdown) {
+               init_waitqueue_head(&shutdown_wq);
+               firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
+                                                 shutdown_task);
+               if (firsterr)
+                       goto unwind;
+               schedule_timeout_uninterruptible(1);
+       }
+       reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
+                              GFP_KERNEL);
+       if (reader_tasks == NULL) {
+               VERBOSE_PERFOUT_ERRSTRING("out of memory");
+               firsterr = -ENOMEM;
+               goto unwind;
+       }
+       for (i = 0; i < nrealreaders; i++) {
+               firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
+                                                 reader_tasks[i]);
+               if (firsterr)
+                       goto unwind;
+       }
+       while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
+               schedule_timeout_uninterruptible(1);
+       writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
+                              GFP_KERNEL);
+       writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
+                                  GFP_KERNEL);
+       writer_n_durations =
+               kcalloc(nrealwriters, sizeof(*writer_n_durations),
+                       GFP_KERNEL);
+       if (!writer_tasks || !writer_durations || !writer_n_durations) {
+               VERBOSE_PERFOUT_ERRSTRING("out of memory");
+               firsterr = -ENOMEM;
+               goto unwind;
+       }
+       for (i = 0; i < nrealwriters; i++) {
+               writer_durations[i] =
+                       kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
+                               GFP_KERNEL);
+               if (!writer_durations[i])
+                       goto unwind;
+               firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
+                                                 writer_tasks[i]);
+               if (firsterr)
+                       goto unwind;
+       }
+       torture_init_end();
+       return 0;
+
+unwind:
+       torture_init_end();
+       rcu_perf_cleanup();
+       return firsterr;
+}
+
+module_init(rcu_perf_init);
+module_exit(rcu_perf_cleanup);
index 250ea67c1615bf1022056e7a01ef088414cc0c39..084a28a732eb7ec53f3a0c055d6be031bb462941 100644 (file)
@@ -130,8 +130,8 @@ static struct rcu_torture __rcu *rcu_torture_current;
 static unsigned long rcu_torture_current_version;
 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
 static DEFINE_SPINLOCK(rcu_torture_lock);
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) = { 0 };
-static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) = { 0 };
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
+static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
 static atomic_t n_rcu_torture_alloc;
 static atomic_t n_rcu_torture_alloc_fail;
@@ -916,7 +916,7 @@ rcu_torture_fqs(void *arg)
 static int
 rcu_torture_writer(void *arg)
 {
-       bool can_expedite = !rcu_gp_is_expedited();
+       bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
        int expediting = 0;
        unsigned long gp_snap;
        bool gp_cond1 = gp_cond, gp_exp1 = gp_exp, gp_normal1 = gp_normal;
@@ -932,7 +932,7 @@ rcu_torture_writer(void *arg)
        VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
        if (!can_expedite) {
                pr_alert("%s" TORTURE_FLAG
-                        " Grace periods expedited from boot/sysfs for %s,\n",
+                        " GP expediting controlled from boot/sysfs for %s,\n",
                         torture_type, cur_ops->name);
                pr_alert("%s" TORTURE_FLAG
                         " Disabled dynamic grace-period expediting.\n",
@@ -1082,17 +1082,6 @@ rcu_torture_fakewriter(void *arg)
        return 0;
 }
 
-static void rcutorture_trace_dump(void)
-{
-       static atomic_t beenhere = ATOMIC_INIT(0);
-
-       if (atomic_read(&beenhere))
-               return;
-       if (atomic_xchg(&beenhere, 1) != 0)
-               return;
-       ftrace_dump(DUMP_ALL);
-}
-
 /*
  * RCU torture reader from timer handler.  Dereferences rcu_torture_current,
  * incrementing the corresponding element of the pipeline array.  The
@@ -1142,7 +1131,7 @@ static void rcu_torture_timer(unsigned long unused)
        if (pipe_count > 1) {
                do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts,
                                          started, completed);
-               rcutorture_trace_dump();
+               rcu_ftrace_dump(DUMP_ALL);
        }
        __this_cpu_inc(rcu_torture_count[pipe_count]);
        completed = completed - started;
@@ -1215,7 +1204,7 @@ rcu_torture_reader(void *arg)
                if (pipe_count > 1) {
                        do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
                                                  ts, started, completed);
-                       rcutorture_trace_dump();
+                       rcu_ftrace_dump(DUMP_ALL);
                }
                __this_cpu_inc(rcu_torture_count[pipe_count]);
                completed = completed - started;
@@ -1333,7 +1322,7 @@ rcu_torture_stats_print(void)
                         rcu_torture_writer_state,
                         gpnum, completed, flags);
                show_rcu_gp_kthreads();
-               rcutorture_trace_dump();
+               rcu_ftrace_dump(DUMP_ALL);
        }
        rtcv_snap = rcu_torture_current_version;
 }
@@ -1489,7 +1478,9 @@ static int rcu_torture_barrier_cbs(void *arg)
                 * The above smp_load_acquire() ensures barrier_phase load
                 * is ordered before the folloiwng ->call().
                 */
+               local_irq_disable(); /* Just to test no-irq call_rcu(). */
                cur_ops->call(&rcu, rcu_torture_barrier_cbf);
+               local_irq_enable();
                if (atomic_dec_and_test(&barrier_cbs_count))
                        wake_up(&barrier_wq);
        } while (!torture_must_stop());
@@ -1596,7 +1587,7 @@ static int rcutorture_cpu_notify(struct notifier_block *self,
 {
        long cpu = (long)hcpu;
 
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
                (void)rcutorture_booster_init(cpu);
index 9a535a86e7326b21dce28a002607370df4de4de4..c7f1bc4f817c4a34e19ebc160693a27f034dbac2 100644 (file)
@@ -102,6 +102,8 @@ struct rcu_state sname##_state = { \
        .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
        .name = RCU_STATE_NAME(sname), \
        .abbr = sabbr, \
+       .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
+       .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
 }
 
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
@@ -370,6 +372,21 @@ void rcu_all_qs(void)
                rcu_momentary_dyntick_idle();
                local_irq_restore(flags);
        }
+       if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))) {
+               /*
+                * Yes, we just checked a per-CPU variable with preemption
+                * enabled, so we might be migrated to some other CPU at
+                * this point.  That is OK because in that case, the
+                * migration will supply the needed quiescent state.
+                * We might end up needlessly disabling preemption and
+                * invoking rcu_sched_qs() on the destination CPU, but
+                * the probability and cost are both quite low, so this
+                * should not be a problem in practice.
+                */
+               preempt_disable();
+               rcu_sched_qs();
+               preempt_enable();
+       }
        this_cpu_inc(rcu_qs_ctr);
        barrier(); /* Avoid RCU read-side critical sections leaking up. */
 }
@@ -385,9 +402,11 @@ module_param(qlowmark, long, 0444);
 
 static ulong jiffies_till_first_fqs = ULONG_MAX;
 static ulong jiffies_till_next_fqs = ULONG_MAX;
+static bool rcu_kick_kthreads;
 
 module_param(jiffies_till_first_fqs, ulong, 0644);
 module_param(jiffies_till_next_fqs, ulong, 0644);
+module_param(rcu_kick_kthreads, bool, 0644);
 
 /*
  * How long the grace period must be before we start recruiting
@@ -459,6 +478,28 @@ unsigned long rcu_batches_completed_bh(void)
 }
 EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
 
+/*
+ * Return the number of RCU expedited batches completed thus far for
+ * debug & stats.  Odd numbers mean that a batch is in progress, even
+ * numbers mean idle.  The value returned will thus be roughly double
+ * the cumulative batches since boot.
+ */
+unsigned long rcu_exp_batches_completed(void)
+{
+       return rcu_state_p->expedited_sequence;
+}
+EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
+
+/*
+ * Return the number of RCU-sched expedited batches completed thus far
+ * for debug & stats.  Similar to rcu_exp_batches_completed().
+ */
+unsigned long rcu_exp_batches_completed_sched(void)
+{
+       return rcu_sched_state.expedited_sequence;
+}
+EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
+
 /*
  * Force a quiescent state.
  */
@@ -637,7 +678,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
                        idle_task(smp_processor_id());
 
                trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
-               ftrace_dump(DUMP_ORIG);
+               rcu_ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
                          idle->pid, idle->comm); /* must be idle task! */
@@ -799,7 +840,7 @@ static void rcu_eqs_exit_common(long long oldval, int user)
 
                trace_rcu_dyntick(TPS("Error on exit: not idle task"),
                                  oldval, rdtp->dynticks_nesting);
-               ftrace_dump(DUMP_ORIG);
+               rcu_ftrace_dump(DUMP_ORIG);
                WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
                          current->pid, current->comm,
                          idle->pid, idle->comm); /* must be idle task! */
@@ -1224,8 +1265,10 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
                       rsp->gp_flags,
                       gp_state_getname(rsp->gp_state), rsp->gp_state,
                       rsp->gp_kthread ? rsp->gp_kthread->state : ~0);
-               if (rsp->gp_kthread)
+               if (rsp->gp_kthread) {
                        sched_show_task(rsp->gp_kthread);
+                       wake_up_process(rsp->gp_kthread);
+               }
        }
 }
 
@@ -1249,6 +1292,25 @@ static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
        }
 }
 
+/*
+ * If too much time has passed in the current grace period, and if
+ * so configured, go kick the relevant kthreads.
+ */
+static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
+{
+       unsigned long j;
+
+       if (!rcu_kick_kthreads)
+               return;
+       j = READ_ONCE(rsp->jiffies_kick_kthreads);
+       if (time_after(jiffies, j) && rsp->gp_kthread) {
+               WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
+               rcu_ftrace_dump(DUMP_ALL);
+               wake_up_process(rsp->gp_kthread);
+               WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
+       }
+}
+
 static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
 {
        int cpu;
@@ -1260,6 +1322,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        struct rcu_node *rnp = rcu_get_root(rsp);
        long totqlen = 0;
 
+       /* Kick and suppress, if so configured. */
+       rcu_stall_kick_kthreads(rsp);
+       if (rcu_cpu_stall_suppress)
+               return;
+
        /* Only let one CPU complain about others per time interval. */
 
        raw_spin_lock_irqsave_rcu_node(rnp, flags);
@@ -1333,6 +1400,11 @@ static void print_cpu_stall(struct rcu_state *rsp)
        struct rcu_node *rnp = rcu_get_root(rsp);
        long totqlen = 0;
 
+       /* Kick and suppress, if so configured. */
+       rcu_stall_kick_kthreads(rsp);
+       if (rcu_cpu_stall_suppress)
+               return;
+
        /*
         * OK, time to rat on ourselves...
         * See Documentation/RCU/stallwarn.txt for info on how to debug
@@ -1377,8 +1449,10 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
        unsigned long js;
        struct rcu_node *rnp;
 
-       if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
+       if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
+           !rcu_gp_in_progress(rsp))
                return;
+       rcu_stall_kick_kthreads(rsp);
        j = jiffies;
 
        /*
@@ -2117,8 +2191,11 @@ static int __noreturn rcu_gp_kthread(void *arg)
                }
                ret = 0;
                for (;;) {
-                       if (!ret)
+                       if (!ret) {
                                rsp->jiffies_force_qs = jiffies + j;
+                               WRITE_ONCE(rsp->jiffies_kick_kthreads,
+                                          jiffies + 3 * j);
+                       }
                        trace_rcu_grace_period(rsp->name,
                                               READ_ONCE(rsp->gpnum),
                                               TPS("fqswait"));
@@ -2144,6 +2221,15 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                                       TPS("fqsend"));
                                cond_resched_rcu_qs();
                                WRITE_ONCE(rsp->gp_activity, jiffies);
+                               ret = 0; /* Force full wait till next FQS. */
+                               j = jiffies_till_next_fqs;
+                               if (j > HZ) {
+                                       j = HZ;
+                                       jiffies_till_next_fqs = HZ;
+                               } else if (j < 1) {
+                                       j = 1;
+                                       jiffies_till_next_fqs = 1;
+                               }
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_rcu_qs();
@@ -2152,14 +2238,12 @@ static int __noreturn rcu_gp_kthread(void *arg)
                                trace_rcu_grace_period(rsp->name,
                                                       READ_ONCE(rsp->gpnum),
                                                       TPS("fqswaitsig"));
-                       }
-                       j = jiffies_till_next_fqs;
-                       if (j > HZ) {
-                               j = HZ;
-                               jiffies_till_next_fqs = HZ;
-                       } else if (j < 1) {
-                               j = 1;
-                               jiffies_till_next_fqs = 1;
+                               ret = 1; /* Keep old FQS timing. */
+                               j = jiffies;
+                               if (time_after(jiffies, rsp->jiffies_force_qs))
+                                       j = 1;
+                               else
+                                       j = rsp->jiffies_force_qs - j;
                        }
                }
 
@@ -3376,8 +3460,12 @@ static void rcu_exp_gp_seq_end(struct rcu_state *rsp)
 }
 static unsigned long rcu_exp_gp_seq_snap(struct rcu_state *rsp)
 {
+       unsigned long s;
+
        smp_mb(); /* Caller's modifications seen first by other CPUs. */
-       return rcu_seq_snap(&rsp->expedited_sequence);
+       s = rcu_seq_snap(&rsp->expedited_sequence);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("snap"));
+       return s;
 }
 static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s)
 {
@@ -3469,7 +3557,7 @@ static void __maybe_unused sync_exp_reset_tree(struct rcu_state *rsp)
  * for the current expedited grace period.  Works only for preemptible
  * RCU -- other RCU implementation use other means.
  *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
+ * Caller must hold the rcu_state's exp_mutex.
  */
 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 {
@@ -3485,8 +3573,8 @@ static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
  * recursively up the tree.  (Calm down, calm down, we do the recursion
  * iteratively!)
  *
- * Caller must hold the root rcu_node's exp_funnel_mutex and the
- * specified rcu_node structure's ->lock.
+ * Caller must hold the rcu_state's exp_mutex and the specified rcu_node
+ * structure's ->lock.
  */
 static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
                                 bool wake, unsigned long flags)
@@ -3523,7 +3611,7 @@ static void __rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
  * Report expedited quiescent state for specified node.  This is a
  * lock-acquisition wrapper function for __rcu_report_exp_rnp().
  *
- * Caller must hold the root rcu_node's exp_funnel_mutex.
+ * Caller must hold the rcu_state's exp_mutex.
  */
 static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
                                              struct rcu_node *rnp, bool wake)
@@ -3536,8 +3624,8 @@ static void __maybe_unused rcu_report_exp_rnp(struct rcu_state *rsp,
 
 /*
  * Report expedited quiescent state for multiple CPUs, all covered by the
- * specified leaf rcu_node structure.  Caller must hold the root
- * rcu_node's exp_funnel_mutex.
+ * specified leaf rcu_node structure.  Caller must hold the rcu_state's
+ * exp_mutex.
  */
 static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
                                    unsigned long mask, bool wake)
@@ -3555,7 +3643,6 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
 
 /*
  * Report expedited quiescent state for specified rcu_data (CPU).
- * Caller must hold the root rcu_node's exp_funnel_mutex.
  */
 static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
                               bool wake)
@@ -3564,15 +3651,11 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
 }
 
 /* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
-static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
-                              struct rcu_data *rdp,
-                              atomic_long_t *stat, unsigned long s)
+static bool sync_exp_work_done(struct rcu_state *rsp, atomic_long_t *stat,
+                              unsigned long s)
 {
        if (rcu_exp_gp_seq_done(rsp, s)) {
-               if (rnp)
-                       mutex_unlock(&rnp->exp_funnel_mutex);
-               else if (rdp)
-                       mutex_unlock(&rdp->exp_funnel_mutex);
+               trace_rcu_exp_grace_period(rsp->name, s, TPS("done"));
                /* Ensure test happens before caller kfree(). */
                smp_mb__before_atomic(); /* ^^^ */
                atomic_long_inc(stat);
@@ -3582,59 +3665,65 @@ static bool sync_exp_work_done(struct rcu_state *rsp, struct rcu_node *rnp,
 }
 
 /*
- * Funnel-lock acquisition for expedited grace periods.  Returns a
- * pointer to the root rcu_node structure, or NULL if some other
- * task did the expedited grace period for us.
+ * Funnel-lock acquisition for expedited grace periods.  Returns true
+ * if some other task completed an expedited grace period that this task
+ * can piggy-back on, and with no mutex held.  Otherwise, returns false
+ * with the mutex held, indicating that the caller must actually do the
+ * expedited grace period.
  */
-static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
+static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
 {
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
-       struct rcu_node *rnp0;
-       struct rcu_node *rnp1 = NULL;
+       struct rcu_node *rnp = rdp->mynode;
+       struct rcu_node *rnp_root = rcu_get_root(rsp);
+
+       /* Low-contention fastpath. */
+       if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s) &&
+           (rnp == rnp_root ||
+            ULONG_CMP_LT(READ_ONCE(rnp_root->exp_seq_rq), s)) &&
+           !mutex_is_locked(&rsp->exp_mutex) &&
+           mutex_trylock(&rsp->exp_mutex))
+               goto fastpath;
 
        /*
-        * First try directly acquiring the root lock in order to reduce
-        * latency in the common case where expedited grace periods are
-        * rare.  We check mutex_is_locked() to avoid pathological levels of
-        * memory contention on ->exp_funnel_mutex in the heavy-load case.
+        * Each pass through the following loop works its way up
+        * the rcu_node tree, returning if others have done the work or
+        * otherwise falls through to acquire rsp->exp_mutex.  The mapping
+        * from CPU to rcu_node structure can be inexact, as it is just
+        * promoting locality and is not strictly needed for correctness.
         */
-       rnp0 = rcu_get_root(rsp);
-       if (!mutex_is_locked(&rnp0->exp_funnel_mutex)) {
-               if (mutex_trylock(&rnp0->exp_funnel_mutex)) {
-                       if (sync_exp_work_done(rsp, rnp0, NULL,
-                                              &rdp->expedited_workdone0, s))
-                               return NULL;
-                       return rnp0;
+       for (; rnp != NULL; rnp = rnp->parent) {
+               if (sync_exp_work_done(rsp, &rdp->exp_workdone1, s))
+                       return true;
+
+               /* Work not done, either wait here or go up. */
+               spin_lock(&rnp->exp_lock);
+               if (ULONG_CMP_GE(rnp->exp_seq_rq, s)) {
+
+                       /* Someone else doing GP, so wait for them. */
+                       spin_unlock(&rnp->exp_lock);
+                       trace_rcu_exp_funnel_lock(rsp->name, rnp->level,
+                                                 rnp->grplo, rnp->grphi,
+                                                 TPS("wait"));
+                       wait_event(rnp->exp_wq[(s >> 1) & 0x3],
+                                  sync_exp_work_done(rsp,
+                                                     &rdp->exp_workdone2, s));
+                       return true;
                }
+               rnp->exp_seq_rq = s; /* Followers can wait on us. */
+               spin_unlock(&rnp->exp_lock);
+               trace_rcu_exp_funnel_lock(rsp->name, rnp->level, rnp->grplo,
+                                         rnp->grphi, TPS("nxtlvl"));
        }
-
-       /*
-        * Each pass through the following loop works its way
-        * up the rcu_node tree, returning if others have done the
-        * work or otherwise falls through holding the root rnp's
-        * ->exp_funnel_mutex.  The mapping from CPU to rcu_node structure
-        * can be inexact, as it is just promoting locality and is not
-        * strictly needed for correctness.
-        */
-       if (sync_exp_work_done(rsp, NULL, NULL, &rdp->expedited_workdone1, s))
-               return NULL;
-       mutex_lock(&rdp->exp_funnel_mutex);
-       rnp0 = rdp->mynode;
-       for (; rnp0 != NULL; rnp0 = rnp0->parent) {
-               if (sync_exp_work_done(rsp, rnp1, rdp,
-                                      &rdp->expedited_workdone2, s))
-                       return NULL;
-               mutex_lock(&rnp0->exp_funnel_mutex);
-               if (rnp1)
-                       mutex_unlock(&rnp1->exp_funnel_mutex);
-               else
-                       mutex_unlock(&rdp->exp_funnel_mutex);
-               rnp1 = rnp0;
+       mutex_lock(&rsp->exp_mutex);
+fastpath:
+       if (sync_exp_work_done(rsp, &rdp->exp_workdone3, s)) {
+               mutex_unlock(&rsp->exp_mutex);
+               return true;
        }
-       if (sync_exp_work_done(rsp, rnp1, rdp,
-                              &rdp->expedited_workdone3, s))
-               return NULL;
-       return rnp1;
+       rcu_exp_gp_seq_start(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("start"));
+       return false;
 }
 
 /* Invoked on each online non-idle CPU for expedited quiescent state. */
@@ -3649,6 +3738,11 @@ static void sync_sched_exp_handler(void *data)
        if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
            __this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
                return;
+       if (rcu_is_cpu_rrupt_from_idle()) {
+               rcu_report_exp_rdp(&rcu_sched_state,
+                                  this_cpu_ptr(&rcu_sched_data), true);
+               return;
+       }
        __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
        resched_cpu(smp_processor_id());
 }
@@ -3773,7 +3867,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
                       rsp->name);
                ndetected = 0;
                rcu_for_each_leaf_node(rsp, rnp) {
-                       ndetected = rcu_print_task_exp_stall(rnp);
+                       ndetected += rcu_print_task_exp_stall(rnp);
                        mask = 1;
                        for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask <<= 1) {
                                struct rcu_data *rdp;
@@ -3783,7 +3877,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
                                ndetected++;
                                rdp = per_cpu_ptr(rsp->rda, cpu);
                                pr_cont(" %d-%c%c%c", cpu,
-                                       "O."[cpu_online(cpu)],
+                                       "O."[!!cpu_online(cpu)],
                                        "o."[!!(rdp->grpmask & rnp->expmaskinit)],
                                        "N."[!!(rdp->grpmask & rnp->expmaskinitnext)]);
                        }
@@ -3792,7 +3886,7 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
                pr_cont(" } %lu jiffies s: %lu root: %#lx/%c\n",
                        jiffies - jiffies_start, rsp->expedited_sequence,
                        rnp_root->expmask, ".T"[!!rnp_root->exp_tasks]);
-               if (!ndetected) {
+               if (ndetected) {
                        pr_err("blocking rcu_node structures:");
                        rcu_for_each_node_breadth_first(rsp, rnp) {
                                if (rnp == rnp_root)
@@ -3818,6 +3912,41 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
        }
 }
 
+/*
+ * Wait for the current expedited grace period to complete, and then
+ * wake up everyone who piggybacked on the just-completed expedited
+ * grace period.  Also update all the ->exp_seq_rq counters as needed
+ * in order to avoid counter-wrap problems.
+ */
+static void rcu_exp_wait_wake(struct rcu_state *rsp, unsigned long s)
+{
+       struct rcu_node *rnp;
+
+       synchronize_sched_expedited_wait(rsp);
+       rcu_exp_gp_seq_end(rsp);
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("end"));
+
+       /*
+        * Switch over to wakeup mode, allowing the next GP, but -only- the
+        * next GP, to proceed.
+        */
+       mutex_lock(&rsp->exp_wake_mutex);
+       mutex_unlock(&rsp->exp_mutex);
+
+       rcu_for_each_node_breadth_first(rsp, rnp) {
+               if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
+                       spin_lock(&rnp->exp_lock);
+                       /* Recheck, avoid hang in case someone just arrived. */
+                       if (ULONG_CMP_LT(rnp->exp_seq_rq, s))
+                               rnp->exp_seq_rq = s;
+                       spin_unlock(&rnp->exp_lock);
+               }
+               wake_up_all(&rnp->exp_wq[(rsp->expedited_sequence >> 1) & 0x3]);
+       }
+       trace_rcu_exp_grace_period(rsp->name, s, TPS("endwake"));
+       mutex_unlock(&rsp->exp_wake_mutex);
+}
+
 /**
  * synchronize_sched_expedited - Brute-force RCU-sched grace period
  *
@@ -3837,7 +3966,6 @@ static void synchronize_sched_expedited_wait(struct rcu_state *rsp)
 void synchronize_sched_expedited(void)
 {
        unsigned long s;
-       struct rcu_node *rnp;
        struct rcu_state *rsp = &rcu_sched_state;
 
        /* If only one CPU, this is automatically a grace period. */
@@ -3852,17 +3980,14 @@ void synchronize_sched_expedited(void)
 
        /* Take a snapshot of the sequence number.  */
        s = rcu_exp_gp_seq_snap(rsp);
-
-       rnp = exp_funnel_lock(rsp, s);
-       if (rnp == NULL)
+       if (exp_funnel_lock(rsp, s))
                return;  /* Someone else did our work for us. */
 
-       rcu_exp_gp_seq_start(rsp);
+       /* Initialize the rcu_node tree in preparation for the wait. */
        sync_rcu_exp_select_cpus(rsp, sync_sched_exp_handler);
-       synchronize_sched_expedited_wait(rsp);
 
-       rcu_exp_gp_seq_end(rsp);
-       mutex_unlock(&rnp->exp_funnel_mutex);
+       /* Wait and clean up, including waking everyone. */
+       rcu_exp_wait_wake(rsp, s);
 }
 EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
 
@@ -4162,7 +4287,6 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
        WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
        rdp->cpu = cpu;
        rdp->rsp = rsp;
-       mutex_init(&rdp->exp_funnel_mutex);
        rcu_boot_init_nocb_percpu_data(rdp);
        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
 }
@@ -4420,10 +4544,8 @@ static void __init rcu_init_one(struct rcu_state *rsp)
 {
        static const char * const buf[] = RCU_NODE_NAME_INIT;
        static const char * const fqs[] = RCU_FQS_NAME_INIT;
-       static const char * const exp[] = RCU_EXP_NAME_INIT;
        static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
        static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
-       static struct lock_class_key rcu_exp_class[RCU_NUM_LVLS];
        static u8 fl_mask = 0x1;
 
        int levelcnt[RCU_NUM_LVLS];             /* # nodes in each level. */
@@ -4482,9 +4604,11 @@ static void __init rcu_init_one(struct rcu_state *rsp)
                        rnp->level = i;
                        INIT_LIST_HEAD(&rnp->blkd_tasks);
                        rcu_init_one_nocb(rnp);
-                       mutex_init(&rnp->exp_funnel_mutex);
-                       lockdep_set_class_and_name(&rnp->exp_funnel_mutex,
-                                                  &rcu_exp_class[i], exp[i]);
+                       init_waitqueue_head(&rnp->exp_wq[0]);
+                       init_waitqueue_head(&rnp->exp_wq[1]);
+                       init_waitqueue_head(&rnp->exp_wq[2]);
+                       init_waitqueue_head(&rnp->exp_wq[3]);
+                       spin_lock_init(&rnp->exp_lock);
                }
        }
 
index df668c0f9e64991346dd94a872ee67e16e97d735..e3959f5e6ddf730e07b433acd3eefc2707387996 100644 (file)
@@ -70,7 +70,6 @@
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0" }
 #elif NR_CPUS <= RCU_FANOUT_2
 #  define RCU_NUM_LVLS       2
 #  define NUM_RCU_LVL_0              1
@@ -79,7 +78,6 @@
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1" }
 #elif NR_CPUS <= RCU_FANOUT_3
 #  define RCU_NUM_LVLS       3
 #  define NUM_RCU_LVL_0              1
@@ -89,7 +87,6 @@
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2" }
 #elif NR_CPUS <= RCU_FANOUT_4
 #  define RCU_NUM_LVLS       4
 #  define NUM_RCU_LVL_0              1
 #  define NUM_RCU_LVL_INIT    { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
 #  define RCU_NODE_NAME_INIT  { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
 #  define RCU_FQS_NAME_INIT   { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
-#  define RCU_EXP_NAME_INIT   { "rcu_node_exp_0", "rcu_node_exp_1", "rcu_node_exp_2", "rcu_node_exp_3" }
 #else
 # error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
 #endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
@@ -252,7 +248,9 @@ struct rcu_node {
                                /* Counts of upcoming no-CB GP requests. */
        raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
 
-       struct mutex exp_funnel_mutex ____cacheline_internodealigned_in_smp;
+       spinlock_t exp_lock ____cacheline_internodealigned_in_smp;
+       unsigned long exp_seq_rq;
+       wait_queue_head_t exp_wq[4];
 } ____cacheline_internodealigned_in_smp;
 
 /*
@@ -387,11 +385,9 @@ struct rcu_data {
 #ifdef CONFIG_RCU_FAST_NO_HZ
        struct rcu_head oom_head;
 #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
-       struct mutex exp_funnel_mutex;
-       atomic_long_t expedited_workdone0;      /* # done by others #0. */
-       atomic_long_t expedited_workdone1;      /* # done by others #1. */
-       atomic_long_t expedited_workdone2;      /* # done by others #2. */
-       atomic_long_t expedited_workdone3;      /* # done by others #3. */
+       atomic_long_t exp_workdone1;    /* # done by others #1. */
+       atomic_long_t exp_workdone2;    /* # done by others #2. */
+       atomic_long_t exp_workdone3;    /* # done by others #3. */
 
        /* 7) Callback offloading. */
 #ifdef CONFIG_RCU_NOCB_CPU
@@ -505,6 +501,8 @@ struct rcu_state {
                                                /*  _rcu_barrier(). */
        /* End of fields guarded by barrier_mutex. */
 
+       struct mutex exp_mutex;                 /* Serialize expedited GP. */
+       struct mutex exp_wake_mutex;            /* Serialize wakeup. */
        unsigned long expedited_sequence;       /* Take a ticket. */
        atomic_long_t expedited_normal;         /* # fallbacks to normal. */
        atomic_t expedited_need_qs;             /* # CPUs left to check in. */
@@ -513,6 +511,8 @@ struct rcu_state {
 
        unsigned long jiffies_force_qs;         /* Time at which to invoke */
                                                /*  force_quiescent_state(). */
+       unsigned long jiffies_kick_kthreads;    /* Time at which to kick */
+                                               /*  kthreads, if configured. */
        unsigned long n_force_qs;               /* Number of calls to */
                                                /*  force_quiescent_state(). */
        unsigned long n_force_qs_lh;            /* ~Number of calls leaving */
index efdf7b61ce120d6acccbb725db87e74fed097b91..ff1cd4e1188d37784ea83bdb700de183531aa7bd 100644 (file)
@@ -722,18 +722,22 @@ static void sync_rcu_exp_handler(void *info)
  * synchronize_rcu_expedited - Brute-force RCU grace period
  *
  * Wait for an RCU-preempt grace period, but expedite it.  The basic
- * idea is to invoke synchronize_sched_expedited() to push all the tasks to
- * the ->blkd_tasks lists and wait for this list to drain.  This consumes
- * significant time on all CPUs and is unfriendly to real-time workloads,
- * so is thus not recommended for any sort of common-case code.
- * In fact, if you are using synchronize_rcu_expedited() in a loop,
- * please restructure your code to batch your updates, and then Use a
- * single synchronize_rcu() instead.
+ * idea is to IPI all non-idle non-nohz online CPUs.  The IPI handler
+ * checks whether the CPU is in an RCU-preempt critical section, and
+ * if so, it sets a flag that causes the outermost rcu_read_unlock()
+ * to report the quiescent state.  On the other hand, if the CPU is
+ * not in an RCU read-side critical section, the IPI handler reports
+ * the quiescent state immediately.
+ *
+ * Although this is a greate improvement over previous expedited
+ * implementations, it is still unfriendly to real-time workloads, so is
+ * thus not recommended for any sort of common-case code.  In fact, if
+ * you are using synchronize_rcu_expedited() in a loop, please restructure
+ * your code to batch your updates, and then Use a single synchronize_rcu()
+ * instead.
  */
 void synchronize_rcu_expedited(void)
 {
-       struct rcu_node *rnp;
-       struct rcu_node *rnp_unlock;
        struct rcu_state *rsp = rcu_state_p;
        unsigned long s;
 
@@ -744,23 +748,14 @@ void synchronize_rcu_expedited(void)
        }
 
        s = rcu_exp_gp_seq_snap(rsp);
-
-       rnp_unlock = exp_funnel_lock(rsp, s);
-       if (rnp_unlock == NULL)
+       if (exp_funnel_lock(rsp, s))
                return;  /* Someone else did our work for us. */
 
-       rcu_exp_gp_seq_start(rsp);
-
        /* Initialize the rcu_node tree in preparation for the wait. */
        sync_rcu_exp_select_cpus(rsp, sync_rcu_exp_handler);
 
-       /* Wait for snapshotted ->blkd_tasks lists to drain. */
-       rnp = rcu_get_root(rsp);
-       synchronize_sched_expedited_wait(rsp);
-
-       /* Clean up and exit. */
-       rcu_exp_gp_seq_end(rsp);
-       mutex_unlock(&rnp_unlock->exp_funnel_mutex);
+       /* Wait for ->blkd_tasks lists to drain, then wake everyone up. */
+       rcu_exp_wait_wake(rsp, s);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
 
index 1088e64f01ad84f98143b95c549bf77ad9c655ab..86782f9a460432614725f0e234785a6b49d9f0db 100644 (file)
@@ -185,17 +185,16 @@ static int show_rcuexp(struct seq_file *m, void *v)
        int cpu;
        struct rcu_state *rsp = (struct rcu_state *)m->private;
        struct rcu_data *rdp;
-       unsigned long s0 = 0, s1 = 0, s2 = 0, s3 = 0;
+       unsigned long s1 = 0, s2 = 0, s3 = 0;
 
        for_each_possible_cpu(cpu) {
                rdp = per_cpu_ptr(rsp->rda, cpu);
-               s0 += atomic_long_read(&rdp->expedited_workdone0);
-               s1 += atomic_long_read(&rdp->expedited_workdone1);
-               s2 += atomic_long_read(&rdp->expedited_workdone2);
-               s3 += atomic_long_read(&rdp->expedited_workdone3);
+               s1 += atomic_long_read(&rdp->exp_workdone1);
+               s2 += atomic_long_read(&rdp->exp_workdone2);
+               s3 += atomic_long_read(&rdp->exp_workdone3);
        }
-       seq_printf(m, "s=%lu wd0=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
-                  rsp->expedited_sequence, s0, s1, s2, s3,
+       seq_printf(m, "s=%lu wd1=%lu wd2=%lu wd3=%lu n=%lu enq=%d sc=%lu\n",
+                  rsp->expedited_sequence, s1, s2, s3,
                   atomic_long_read(&rsp->expedited_normal),
                   atomic_read(&rsp->expedited_need_qs),
                   rsp->expedited_sequence / 2);
index ca828b41c938b24e5b11b33e421ea4b4e2366f1f..3ccdc8eebc5afff02707b7b20772e8b4f978486c 100644 (file)
@@ -67,7 +67,7 @@ static int rcu_normal_after_boot;
 module_param(rcu_normal_after_boot, int, 0);
 #endif /* #ifndef CONFIG_TINY_RCU */
 
-#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_PREEMPT_COUNT)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
 /**
  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
  *
@@ -111,7 +111,7 @@ int rcu_read_lock_sched_held(void)
                return 0;
        if (debug_locks)
                lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
-       return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
+       return lockdep_opinion || !preemptible();
 }
 EXPORT_SYMBOL(rcu_read_lock_sched_held);
 #endif
index 8b489fcac37bd9d829439feb08d8bac1354c7e71..d1f7149f870439d65b9cfcfbc27d4160bbb1672f 100644 (file)
@@ -596,17 +596,8 @@ bool sched_can_stop_tick(struct rq *rq)
                return false;
 
        /*
-        * FIFO realtime policy runs the highest priority task (after DEADLINE).
-        * Other runnable tasks are of a lower priority. The scheduler tick
-        * isn't needed.
-        */
-       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
-       if (fifo_nr_running)
-               return true;
-
-       /*
-        * Round-robin realtime tasks time slice with other tasks at the same
-        * realtime priority.
+        * If there are more than one RR tasks, we need the tick to effect the
+        * actual RR behaviour.
         */
        if (rq->rt.rr_nr_running) {
                if (rq->rt.rr_nr_running == 1)
@@ -615,8 +606,20 @@ bool sched_can_stop_tick(struct rq *rq)
                        return false;
        }
 
-       /* Normal multitasking need periodic preemption checks */
-       if (rq->cfs.nr_running > 1)
+       /*
+        * If there's no RR tasks, but FIFO tasks, we can skip the tick, no
+        * forced preemption between FIFO tasks.
+        */
+       fifo_nr_running = rq->rt.rt_nr_running - rq->rt.rr_nr_running;
+       if (fifo_nr_running)
+               return true;
+
+       /*
+        * If there are no DL,RR/FIFO tasks, there must only be CFS tasks left;
+        * if there's more than one we need the tick for involuntary
+        * preemption.
+        */
+       if (rq->nr_running > 1)
                return false;
 
        return true;
index affd97ec9f65a0c1b9751ee9f1d39cf77757b0aa..686ec8adf952fbd3767c652515089d92b3827ae0 100644 (file)
@@ -1394,6 +1394,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
                                     !cpumask_test_cpu(later_rq->cpu,
                                                       &task->cpus_allowed) ||
                                     task_running(rq, task) ||
+                                    !dl_task(task) ||
                                     !task_on_rq_queued(task))) {
                                double_unlock_balance(rq, later_rq);
                                later_rq = NULL;
index 0fe30e66aff1db44d58ec96cbee332a78257e4d3..e7dd0ec169bea82c630e2b8d897d2aee0cc9571b 100644 (file)
@@ -3030,7 +3030,14 @@ static int idle_balance(struct rq *this_rq);
 
 #else /* CONFIG_SMP */
 
-static inline void update_load_avg(struct sched_entity *se, int update_tg) {}
+static inline void update_load_avg(struct sched_entity *se, int not_used)
+{
+       struct cfs_rq *cfs_rq = cfs_rq_of(se);
+       struct rq *rq = rq_of(cfs_rq);
+
+       cpufreq_trigger_update(rq_clock(rq));
+}
+
 static inline void
 enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {}
 static inline void
@@ -3181,25 +3188,17 @@ static inline void check_schedstat_required(void)
 static void
 enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
 {
-       bool renorm = !(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING);
-       bool curr = cfs_rq->curr == se;
-
        /*
-        * If we're the current task, we must renormalise before calling
-        * update_curr().
+        * Update the normalized vruntime before updating min_vruntime
+        * through calling update_curr().
         */
-       if (renorm && curr)
+       if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
                se->vruntime += cfs_rq->min_vruntime;
 
-       update_curr(cfs_rq);
-
        /*
-        * Otherwise, renormalise after, such that we're placed at the current
-        * moment in time, instead of some random moment in the past.
+        * Update run-time statistics of the 'current'.
         */
-       if (renorm && !curr)
-               se->vruntime += cfs_rq->min_vruntime;
-
+       update_curr(cfs_rq);
        enqueue_entity_load_avg(cfs_rq, se);
        account_entity_enqueue(cfs_rq, se);
        update_cfs_shares(cfs_rq);
@@ -3215,7 +3214,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
                update_stats_enqueue(cfs_rq, se);
                check_spread(cfs_rq, se);
        }
-       if (!curr)
+       if (se != cfs_rq->curr)
                __enqueue_entity(cfs_rq, se);
        se->on_rq = 1;
 
index c41ea7ac1764b831fd015531c3d8f40d445bf8ae..ec4f538d4396beb20e2656923995438d5093f667 100644 (file)
@@ -1729,6 +1729,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
                                     !cpumask_test_cpu(lowest_rq->cpu,
                                                       tsk_cpus_allowed(task)) ||
                                     task_running(rq, task) ||
+                                    !rt_task(task) ||
                                     !task_on_rq_queued(task))) {
 
                                double_unlock_balance(rq, lowest_rq);
index aa9bf00749c151b70026a78111149f61f1a99727..ab122a2cee417e806d002fb8eed27756b1e89dcf 100644 (file)
@@ -3099,12 +3099,14 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
 
        oss.ss_sp = (void __user *) current->sas_ss_sp;
        oss.ss_size = current->sas_ss_size;
-       oss.ss_flags = sas_ss_flags(sp);
+       oss.ss_flags = sas_ss_flags(sp) |
+               (current->sas_ss_flags & SS_FLAG_BITS);
 
        if (uss) {
                void __user *ss_sp;
                size_t ss_size;
-               int ss_flags;
+               unsigned ss_flags;
+               int ss_mode;
 
                error = -EFAULT;
                if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
@@ -3119,18 +3121,13 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
                if (on_sig_stack(sp))
                        goto out;
 
+               ss_mode = ss_flags & ~SS_FLAG_BITS;
                error = -EINVAL;
-               /*
-                * Note - this code used to test ss_flags incorrectly:
-                *        old code may have been written using ss_flags==0
-                *        to mean ss_flags==SS_ONSTACK (as this was the only
-                *        way that worked) - this fix preserves that older
-                *        mechanism.
-                */
-               if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
+               if (ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
+                               ss_mode != 0)
                        goto out;
 
-               if (ss_flags == SS_DISABLE) {
+               if (ss_mode == SS_DISABLE) {
                        ss_size = 0;
                        ss_sp = NULL;
                } else {
@@ -3141,6 +3138,7 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
 
                current->sas_ss_sp = (unsigned long) ss_sp;
                current->sas_ss_size = ss_size;
+               current->sas_ss_flags = ss_flags;
        }
 
        error = 0;
@@ -3171,9 +3169,14 @@ int restore_altstack(const stack_t __user *uss)
 int __save_altstack(stack_t __user *uss, unsigned long sp)
 {
        struct task_struct *t = current;
-       return  __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
-               __put_user(sas_ss_flags(sp), &uss->ss_flags) |
+       int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
+               __put_user(t->sas_ss_flags, &uss->ss_flags) |
                __put_user(t->sas_ss_size, &uss->ss_size);
+       if (err)
+               return err;
+       if (t->sas_ss_flags & SS_AUTODISARM)
+               sas_ss_reset(t);
+       return 0;
 }
 
 #ifdef CONFIG_COMPAT
index 44aa462d033f700a86e1439d1b24053daca4094a..fa0bdeee17ac3c01d1cc04283b9629e1586d1c67 100644 (file)
@@ -451,6 +451,7 @@ static int torture_shutdown(void *arg)
                torture_shutdown_hook();
        else
                VERBOSE_TOROUT_STRING("No torture_shutdown_hook(), skipping.");
+       ftrace_dump(DUMP_ALL);
        kernel_power_off();     /* Shut down the system. */
        return 0;
 }
@@ -602,8 +603,9 @@ bool torture_init_begin(char *ttype, bool v, int *runnable)
 {
        mutex_lock(&fullstop_mutex);
        if (torture_type != NULL) {
-               pr_alert("torture_init_begin: refusing %s init: %s running",
+               pr_alert("torture_init_begin: Refusing %s init: %s running.\n",
                         ttype, torture_type);
+               pr_alert("torture_init_begin: One torture test at a time!\n");
                mutex_unlock(&fullstop_mutex);
                return false;
        }
index 05ddc0820771eb7bc456aab9e2f0e5167ce7f023..6f965864cc029ad47f63c67f6713cb79a519599e 100644 (file)
@@ -2095,8 +2095,13 @@ event_create_dir(struct dentry *parent, struct trace_event_file *file)
        trace_create_file("filter", 0644, file->dir, file,
                          &ftrace_event_filter_fops);
 
-       trace_create_file("trigger", 0644, file->dir, file,
-                         &event_trigger_fops);
+       /*
+        * Only event directories that can be enabled should have
+        * triggers.
+        */
+       if (!(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
+               trace_create_file("trigger", 0644, file->dir, file,
+                                 &event_trigger_fops);
 
        trace_create_file("format", 0444, file->dir, call,
                          &ftrace_event_format_fops);
index 3bfdff06eea728b38364652808e9f548f0e6fe37..5f5068e94003d80836040d75931269aca247e853 100644 (file)
@@ -4554,6 +4554,17 @@ static void rebind_workers(struct worker_pool *pool)
                                                  pool->attrs->cpumask) < 0);
 
        spin_lock_irq(&pool->lock);
+
+       /*
+        * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
+        * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
+        * being reworked and this can go away in time.
+        */
+       if (!(pool->flags & POOL_DISASSOCIATED)) {
+               spin_unlock_irq(&pool->lock);
+               return;
+       }
+
        pool->flags &= ~POOL_DISASSOCIATED;
 
        for_each_pool_worker(worker, pool) {
index 1e9a607534ca08a973deac3d1de62902b533b1cb..f4b797a690ba10e651be34dbcf09e81ef19e3c79 100644 (file)
@@ -1289,6 +1289,39 @@ config TORTURE_TEST
        tristate
        default n
 
+config RCU_PERF_TEST
+       tristate "performance tests for RCU"
+       depends on DEBUG_KERNEL
+       select TORTURE_TEST
+       select SRCU
+       select TASKS_RCU
+       default n
+       help
+         This option provides a kernel module that runs performance
+         tests on the RCU infrastructure.  The kernel module may be built
+         after the fact on the running kernel to be tested, if desired.
+
+         Say Y here if you want RCU performance tests to be built into
+         the kernel.
+         Say M if you want the RCU performance tests to build as a module.
+         Say N if you are unsure.
+
+config RCU_PERF_TEST_RUNNABLE
+       bool "performance tests for RCU runnable by default"
+       depends on RCU_PERF_TEST = y
+       default n
+       help
+         This option provides a way to build the RCU performance tests
+         directly into the kernel without them starting up at boot time.
+         You can use /sys/module to manually override this setting.
+         This /proc file is available only when the RCU performance
+         tests have been built into the kernel.
+
+         Say Y here if you want the RCU performance tests to start during
+         boot (you probably don't).
+         Say N here if you want the RCU performance tests to start only
+         after being manually enabled via /sys/module.
+
 config RCU_TORTURE_TEST
        tristate "torture tests for RCU"
        depends on DEBUG_KERNEL
index 7bd6fd436c97a0bfab9b2af388f046ce1059f36f..a65e9a8615355c5e55576141bf397de7a7bf833e 100644 (file)
@@ -23,7 +23,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o dump_stack.o timerqueue.o\
         idr.o int_sqrt.o extable.o \
         sha1.o md5.o irq_regs.o argv_split.o \
-        proportions.o flex_proportions.o ratelimit.o show_mem.o \
+        flex_proportions.o ratelimit.o show_mem.o \
         is_single_threaded.o plist.o decompress.o kobject_uevent.o \
         earlycpio.o seq_buf.o nmi_backtrace.o
 
index 2b3f46c049d458a590d080823b344da3b3229f7c..554522934c442ae15c01b1b1266e8e3e9515bfbd 100644 (file)
@@ -74,7 +74,7 @@ next_tag:
 
        /* Extract a tag from the data */
        tag = data[dp++];
-       if (tag == 0) {
+       if (tag == ASN1_EOC) {
                /* It appears to be an EOC. */
                if (data[dp++] != 0)
                        goto invalid_eoc;
@@ -96,10 +96,8 @@ next_tag:
 
        /* Extract the length */
        len = data[dp++];
-       if (len <= 0x7f) {
-               dp += len;
-               goto next_tag;
-       }
+       if (len <= 0x7f)
+               goto check_length;
 
        if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
                /* Indefinite length */
@@ -110,14 +108,18 @@ next_tag:
        }
 
        n = len - 0x80;
-       if (unlikely(n > sizeof(size_t) - 1))
+       if (unlikely(n > sizeof(len) - 1))
                goto length_too_long;
        if (unlikely(n > datalen - dp))
                goto data_overrun_error;
-       for (len = 0; n > 0; n--) {
+       len = 0;
+       for (; n > 0; n--) {
                len <<= 8;
                len |= data[dp++];
        }
+check_length:
+       if (len > datalen - dp)
+               goto data_overrun_error;
        dp += len;
        goto next_tag;
 
index 5fecddc32b1b47e1c2d7ce9706b9a840995f3d4e..ca5316e0087b55de6830f6a2abe24e41291015b3 100644 (file)
@@ -569,6 +569,25 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
 }
 EXPORT_SYMBOL(iov_iter_alignment);
 
+unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
+{
+        unsigned long res = 0;
+       size_t size = i->count;
+       if (!size)
+               return 0;
+
+       iterate_all_kinds(i, size, v,
+               (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+                       (size != v.iov_len ? size : 0), 0),
+               (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
+                       (size != v.bv_len ? size : 0)),
+               (res |= (!res ? 0 : (unsigned long)v.iov_base) |
+                       (size != v.iov_len ? size : 0))
+               );
+               return res;
+}
+EXPORT_SYMBOL(iov_iter_gap_alignment);
+
 ssize_t iov_iter_get_pages(struct iov_iter *i,
                   struct page **pages, size_t maxsize, unsigned maxpages,
                   size_t *start)
diff --git a/lib/proportions.c b/lib/proportions.c
deleted file mode 100644 (file)
index efa54f2..0000000
+++ /dev/null
@@ -1,407 +0,0 @@
-/*
- * Floating proportions
- *
- *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
- *
- * Description:
- *
- * The floating proportion is a time derivative with an exponentially decaying
- * history:
- *
- *   p_{j} = \Sum_{i=0} (dx_{j}/dt_{-i}) / 2^(1+i)
- *
- * Where j is an element from {prop_local}, x_{j} is j's number of events,
- * and i the time period over which the differential is taken. So d/dt_{-i} is
- * the differential over the i-th last period.
- *
- * The decaying history gives smooth transitions. The time differential carries
- * the notion of speed.
- *
- * The denominator is 2^(1+i) because we want the series to be normalised, ie.
- *
- *   \Sum_{i=0} 1/2^(1+i) = 1
- *
- * Further more, if we measure time (t) in the same events as x; so that:
- *
- *   t = \Sum_{j} x_{j}
- *
- * we get that:
- *
- *   \Sum_{j} p_{j} = 1
- *
- * Writing this in an iterative fashion we get (dropping the 'd's):
- *
- *   if (++x_{j}, ++t > period)
- *     t /= 2;
- *     for_each (j)
- *       x_{j} /= 2;
- *
- * so that:
- *
- *   p_{j} = x_{j} / t;
- *
- * We optimize away the '/= 2' for the global time delta by noting that:
- *
- *   if (++t > period) t /= 2:
- *
- * Can be approximated by:
- *
- *   period/2 + (++t % period/2)
- *
- * [ Furthermore, when we choose period to be 2^n it can be written in terms of
- *   binary operations and wraparound artefacts disappear. ]
- *
- * Also note that this yields a natural counter of the elapsed periods:
- *
- *   c = t / (period/2)
- *
- * [ Its monotonic increasing property can be applied to mitigate the wrap-
- *   around issue. ]
- *
- * This allows us to do away with the loop over all prop_locals on each period
- * expiration. By remembering the period count under which it was last accessed
- * as c_{j}, we can obtain the number of 'missed' cycles from:
- *
- *   c - c_{j}
- *
- * We can then lazily catch up to the global period count every time we are
- * going to use x_{j}, by doing:
- *
- *   x_{j} /= 2^(c - c_{j}), c_{j} = c
- */
-
-#include <linux/proportions.h>
-#include <linux/rcupdate.h>
-
-int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp)
-{
-       int err;
-
-       if (shift > PROP_MAX_SHIFT)
-               shift = PROP_MAX_SHIFT;
-
-       pd->index = 0;
-       pd->pg[0].shift = shift;
-       mutex_init(&pd->mutex);
-       err = percpu_counter_init(&pd->pg[0].events, 0, gfp);
-       if (err)
-               goto out;
-
-       err = percpu_counter_init(&pd->pg[1].events, 0, gfp);
-       if (err)
-               percpu_counter_destroy(&pd->pg[0].events);
-
-out:
-       return err;
-}
-
-/*
- * We have two copies, and flip between them to make it seem like an atomic
- * update. The update is not really atomic wrt the events counter, but
- * it is internally consistent with the bit layout depending on shift.
- *
- * We copy the events count, move the bits around and flip the index.
- */
-void prop_change_shift(struct prop_descriptor *pd, int shift)
-{
-       int index;
-       int offset;
-       u64 events;
-       unsigned long flags;
-
-       if (shift > PROP_MAX_SHIFT)
-               shift = PROP_MAX_SHIFT;
-
-       mutex_lock(&pd->mutex);
-
-       index = pd->index ^ 1;
-       offset = pd->pg[pd->index].shift - shift;
-       if (!offset)
-               goto out;
-
-       pd->pg[index].shift = shift;
-
-       local_irq_save(flags);
-       events = percpu_counter_sum(&pd->pg[pd->index].events);
-       if (offset < 0)
-               events <<= -offset;
-       else
-               events >>= offset;
-       percpu_counter_set(&pd->pg[index].events, events);
-
-       /*
-        * ensure the new pg is fully written before the switch
-        */
-       smp_wmb();
-       pd->index = index;
-       local_irq_restore(flags);
-
-       synchronize_rcu();
-
-out:
-       mutex_unlock(&pd->mutex);
-}
-
-/*
- * wrap the access to the data in an rcu_read_lock() section;
- * this is used to track the active references.
- */
-static struct prop_global *prop_get_global(struct prop_descriptor *pd)
-__acquires(RCU)
-{
-       int index;
-
-       rcu_read_lock();
-       index = pd->index;
-       /*
-        * match the wmb from vcd_flip()
-        */
-       smp_rmb();
-       return &pd->pg[index];
-}
-
-static void prop_put_global(struct prop_descriptor *pd, struct prop_global *pg)
-__releases(RCU)
-{
-       rcu_read_unlock();
-}
-
-static void
-prop_adjust_shift(int *pl_shift, unsigned long *pl_period, int new_shift)
-{
-       int offset = *pl_shift - new_shift;
-
-       if (!offset)
-               return;
-
-       if (offset < 0)
-               *pl_period <<= -offset;
-       else
-               *pl_period >>= offset;
-
-       *pl_shift = new_shift;
-}
-
-/*
- * PERCPU
- */
-
-#define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
-
-int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp)
-{
-       raw_spin_lock_init(&pl->lock);
-       pl->shift = 0;
-       pl->period = 0;
-       return percpu_counter_init(&pl->events, 0, gfp);
-}
-
-void prop_local_destroy_percpu(struct prop_local_percpu *pl)
-{
-       percpu_counter_destroy(&pl->events);
-}
-
-/*
- * Catch up with missed period expirations.
- *
- *   until (c_{j} == c)
- *     x_{j} -= x_{j}/2;
- *     c_{j}++;
- */
-static
-void prop_norm_percpu(struct prop_global *pg, struct prop_local_percpu *pl)
-{
-       unsigned long period = 1UL << (pg->shift - 1);
-       unsigned long period_mask = ~(period - 1);
-       unsigned long global_period;
-       unsigned long flags;
-
-       global_period = percpu_counter_read(&pg->events);
-       global_period &= period_mask;
-
-       /*
-        * Fast path - check if the local and global period count still match
-        * outside of the lock.
-        */
-       if (pl->period == global_period)
-               return;
-
-       raw_spin_lock_irqsave(&pl->lock, flags);
-       prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
-
-       /*
-        * For each missed period, we half the local counter.
-        * basically:
-        *   pl->events >> (global_period - pl->period);
-        */
-       period = (global_period - pl->period) >> (pg->shift - 1);
-       if (period < BITS_PER_LONG) {
-               s64 val = percpu_counter_read(&pl->events);
-
-               if (val < (nr_cpu_ids * PROP_BATCH))
-                       val = percpu_counter_sum(&pl->events);
-
-               __percpu_counter_add(&pl->events, -val + (val >> period),
-                                       PROP_BATCH);
-       } else
-               percpu_counter_set(&pl->events, 0);
-
-       pl->period = global_period;
-       raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/*
- *   ++x_{j}, ++t
- */
-void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
-{
-       struct prop_global *pg = prop_get_global(pd);
-
-       prop_norm_percpu(pg, pl);
-       __percpu_counter_add(&pl->events, 1, PROP_BATCH);
-       percpu_counter_add(&pg->events, 1);
-       prop_put_global(pd, pg);
-}
-
-/*
- * identical to __prop_inc_percpu, except that it limits this pl's fraction to
- * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
- */
-void __prop_inc_percpu_max(struct prop_descriptor *pd,
-                          struct prop_local_percpu *pl, long frac)
-{
-       struct prop_global *pg = prop_get_global(pd);
-
-       prop_norm_percpu(pg, pl);
-
-       if (unlikely(frac != PROP_FRAC_BASE)) {
-               unsigned long period_2 = 1UL << (pg->shift - 1);
-               unsigned long counter_mask = period_2 - 1;
-               unsigned long global_count;
-               long numerator, denominator;
-
-               numerator = percpu_counter_read_positive(&pl->events);
-               global_count = percpu_counter_read(&pg->events);
-               denominator = period_2 + (global_count & counter_mask);
-
-               if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
-                       goto out_put;
-       }
-
-       percpu_counter_add(&pl->events, 1);
-       percpu_counter_add(&pg->events, 1);
-
-out_put:
-       prop_put_global(pd, pg);
-}
-
-/*
- * Obtain a fraction of this proportion
- *
- *   p_{j} = x_{j} / (period/2 + t % period/2)
- */
-void prop_fraction_percpu(struct prop_descriptor *pd,
-               struct prop_local_percpu *pl,
-               long *numerator, long *denominator)
-{
-       struct prop_global *pg = prop_get_global(pd);
-       unsigned long period_2 = 1UL << (pg->shift - 1);
-       unsigned long counter_mask = period_2 - 1;
-       unsigned long global_count;
-
-       prop_norm_percpu(pg, pl);
-       *numerator = percpu_counter_read_positive(&pl->events);
-
-       global_count = percpu_counter_read(&pg->events);
-       *denominator = period_2 + (global_count & counter_mask);
-
-       prop_put_global(pd, pg);
-}
-
-/*
- * SINGLE
- */
-
-int prop_local_init_single(struct prop_local_single *pl)
-{
-       raw_spin_lock_init(&pl->lock);
-       pl->shift = 0;
-       pl->period = 0;
-       pl->events = 0;
-       return 0;
-}
-
-void prop_local_destroy_single(struct prop_local_single *pl)
-{
-}
-
-/*
- * Catch up with missed period expirations.
- */
-static
-void prop_norm_single(struct prop_global *pg, struct prop_local_single *pl)
-{
-       unsigned long period = 1UL << (pg->shift - 1);
-       unsigned long period_mask = ~(period - 1);
-       unsigned long global_period;
-       unsigned long flags;
-
-       global_period = percpu_counter_read(&pg->events);
-       global_period &= period_mask;
-
-       /*
-        * Fast path - check if the local and global period count still match
-        * outside of the lock.
-        */
-       if (pl->period == global_period)
-               return;
-
-       raw_spin_lock_irqsave(&pl->lock, flags);
-       prop_adjust_shift(&pl->shift, &pl->period, pg->shift);
-       /*
-        * For each missed period, we half the local counter.
-        */
-       period = (global_period - pl->period) >> (pg->shift - 1);
-       if (likely(period < BITS_PER_LONG))
-               pl->events >>= period;
-       else
-               pl->events = 0;
-       pl->period = global_period;
-       raw_spin_unlock_irqrestore(&pl->lock, flags);
-}
-
-/*
- *   ++x_{j}, ++t
- */
-void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
-{
-       struct prop_global *pg = prop_get_global(pd);
-
-       prop_norm_single(pg, pl);
-       pl->events++;
-       percpu_counter_add(&pg->events, 1);
-       prop_put_global(pd, pg);
-}
-
-/*
- * Obtain a fraction of this proportion
- *
- *   p_{j} = x_{j} / (period/2 + t % period/2)
- */
-void prop_fraction_single(struct prop_descriptor *pd,
-               struct prop_local_single *pl,
-               long *numerator, long *denominator)
-{
-       struct prop_global *pg = prop_get_global(pd);
-       unsigned long period_2 = 1UL << (pg->shift - 1);
-       unsigned long counter_mask = period_2 - 1;
-       unsigned long global_count;
-
-       prop_norm_single(pg, pl);
-       *numerator = pl->events;
-
-       global_count = percpu_counter_read(&pg->events);
-       *denominator = period_2 + (global_count & counter_mask);
-
-       prop_put_global(pd, pg);
-}
index 9e0b0315a724caf24ea195e275ed431639a262b4..53ad6c0831aebe6d3c9ec7cfc1387fa34ca7ea72 100644 (file)
 
 #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8)
 
+#define STACK_ALLOC_NULL_PROTECTION_BITS 1
 #define STACK_ALLOC_ORDER 2 /* 'Slab' size order for stack depot, 4 pages */
 #define STACK_ALLOC_SIZE (1LL << (PAGE_SHIFT + STACK_ALLOC_ORDER))
 #define STACK_ALLOC_ALIGN 4
 #define STACK_ALLOC_OFFSET_BITS (STACK_ALLOC_ORDER + PAGE_SHIFT - \
                                        STACK_ALLOC_ALIGN)
-#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - STACK_ALLOC_OFFSET_BITS)
+#define STACK_ALLOC_INDEX_BITS (DEPOT_STACK_BITS - \
+               STACK_ALLOC_NULL_PROTECTION_BITS - STACK_ALLOC_OFFSET_BITS)
 #define STACK_ALLOC_SLABS_CAP 1024
 #define STACK_ALLOC_MAX_SLABS \
        (((1LL << (STACK_ALLOC_INDEX_BITS)) < STACK_ALLOC_SLABS_CAP) ? \
@@ -59,6 +61,7 @@ union handle_parts {
        struct {
                u32 slabindex : STACK_ALLOC_INDEX_BITS;
                u32 offset : STACK_ALLOC_OFFSET_BITS;
+               u32 valid : STACK_ALLOC_NULL_PROTECTION_BITS;
        };
 };
 
@@ -136,6 +139,7 @@ static struct stack_record *depot_alloc_stack(unsigned long *entries, int size,
        stack->size = size;
        stack->handle.slabindex = depot_index;
        stack->handle.offset = depot_offset >> STACK_ALLOC_ALIGN;
+       stack->handle.valid = 1;
        memcpy(stack->entries, entries, size * sizeof(unsigned long));
        depot_offset += required_size;
 
index ccf97b02b85f32d38f6f68a41bedf89b9a1ee596..8fa2540438015c1859724c606072a2770939d954 100644 (file)
@@ -852,16 +852,8 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
                                                        ISOLATE_UNEVICTABLE);
 
-               /*
-                * In case of fatal failure, release everything that might
-                * have been isolated in the previous iteration, and signal
-                * the failure back to caller.
-                */
-               if (!pfn) {
-                       putback_movable_pages(&cc->migratepages);
-                       cc->nr_migratepages = 0;
+               if (!pfn)
                        break;
-               }
 
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
                        break;
@@ -1741,7 +1733,7 @@ void compaction_unregister_node(struct node *node)
 
 static inline bool kcompactd_work_requested(pg_data_t *pgdat)
 {
-       return pgdat->kcompactd_max_order > 0;
+       return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
 }
 
 static bool kcompactd_node_suitable(pg_data_t *pgdat)
@@ -1805,6 +1797,8 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                INIT_LIST_HEAD(&cc.freepages);
                INIT_LIST_HEAD(&cc.migratepages);
 
+               if (kthread_should_stop())
+                       return;
                status = compact_zone(zone, &cc);
 
                if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
index df67b53ae3c53423b8f3065093a6e65f90dbafb7..b49ee126d4d1feb7a89a9c776edc89141c3351da 100644 (file)
@@ -1298,15 +1298,9 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
        /*
         * We can only reuse the page if nobody else maps the huge page or it's
-        * part. We can do it by checking page_mapcount() on each sub-page, but
-        * it's expensive.
-        * The cheaper way is to check page_count() to be equal 1: every
-        * mapcount takes page reference reference, so this way we can
-        * guarantee, that the PMD is the only mapping.
-        * This can give false negative if somebody pinned the page, but that's
-        * fine.
+        * part.
         */
-       if (page_mapcount(page) == 1 && page_count(page) == 1) {
+       if (page_trans_huge_mapcount(page, NULL) == 1) {
                pmd_t entry;
                entry = pmd_mkyoung(orig_pmd);
                entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
@@ -2079,7 +2073,8 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
                if (pte_write(pteval)) {
                        writable = true;
                } else {
-                       if (PageSwapCache(page) && !reuse_swap_page(page)) {
+                       if (PageSwapCache(page) &&
+                           !reuse_swap_page(page, NULL)) {
                                unlock_page(page);
                                result = SCAN_SWAP_CACHE_PAGE;
                                goto out;
@@ -3222,6 +3217,64 @@ int total_mapcount(struct page *page)
        return ret;
 }
 
+/*
+ * This calculates accurately how many mappings a transparent hugepage
+ * has (unlike page_mapcount() which isn't fully accurate). This full
+ * accuracy is primarily needed to know if copy-on-write faults can
+ * reuse the page and change the mapping to read-write instead of
+ * copying them. At the same time this returns the total_mapcount too.
+ *
+ * The function returns the highest mapcount any one of the subpages
+ * has. If the return value is one, even if different processes are
+ * mapping different subpages of the transparent hugepage, they can
+ * all reuse it, because each process is reusing a different subpage.
+ *
+ * The total_mapcount is instead counting all virtual mappings of the
+ * subpages. If the total_mapcount is equal to "one", it tells the
+ * caller all mappings belong to the same "mm" and in turn the
+ * anon_vma of the transparent hugepage can become the vma->anon_vma
+ * local one as no other process may be mapping any of the subpages.
+ *
+ * It would be more accurate to replace page_mapcount() with
+ * page_trans_huge_mapcount(), however we only use
+ * page_trans_huge_mapcount() in the copy-on-write faults where we
+ * need full accuracy to avoid breaking page pinning, because
+ * page_trans_huge_mapcount() is slower than page_mapcount().
+ */
+int page_trans_huge_mapcount(struct page *page, int *total_mapcount)
+{
+       int i, ret, _total_mapcount, mapcount;
+
+       /* hugetlbfs shouldn't call it */
+       VM_BUG_ON_PAGE(PageHuge(page), page);
+
+       if (likely(!PageTransCompound(page))) {
+               mapcount = atomic_read(&page->_mapcount) + 1;
+               if (total_mapcount)
+                       *total_mapcount = mapcount;
+               return mapcount;
+       }
+
+       page = compound_head(page);
+
+       _total_mapcount = ret = 0;
+       for (i = 0; i < HPAGE_PMD_NR; i++) {
+               mapcount = atomic_read(&page[i]._mapcount) + 1;
+               ret = max(ret, mapcount);
+               _total_mapcount += mapcount;
+       }
+       if (PageDoubleMap(page)) {
+               ret -= 1;
+               _total_mapcount -= HPAGE_PMD_NR;
+       }
+       mapcount = compound_mapcount(page);
+       ret += mapcount;
+       _total_mapcount += mapcount;
+       if (total_mapcount)
+               *total_mapcount = _total_mapcount;
+       return ret;
+}
+
 /*
  * This function splits huge page into normal pages. @page can point to any
  * subpage of huge page to split. Split doesn't change the position of @page.
@@ -3452,7 +3505,7 @@ next:
                }
        }
 
-       pr_info("%lu of %lu THP split", split, total);
+       pr_info("%lu of %lu THP split\n", split, total);
 
        return 0;
 }
@@ -3463,7 +3516,7 @@ static int __init split_huge_pages_debugfs(void)
 {
        void *ret;
 
-       ret = debugfs_create_file("split_huge_pages", 0644, NULL, NULL,
+       ret = debugfs_create_file("split_huge_pages", 0200, NULL, NULL,
                        &split_huge_pages_fops);
        if (!ret)
                pr_warn("Failed to create split_huge_pages in debugfs");
index b99e828172f6ef30e279e4d07b7a74ba9cbb36db..4786b4150f62108438f5aabaf77d339280eb51dc 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -783,6 +783,7 @@ static int unmerge_and_remove_all_rmap_items(void)
                }
 
                remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list);
+               up_read(&mm->mmap_sem);
 
                spin_lock(&ksm_mmlist_lock);
                ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next,
@@ -794,12 +795,9 @@ static int unmerge_and_remove_all_rmap_items(void)
 
                        free_mm_slot(mm_slot);
                        clear_bit(MMF_VM_MERGEABLE, &mm->flags);
-                       up_read(&mm->mmap_sem);
                        mmdrop(mm);
-               } else {
+               } else
                        spin_unlock(&ksm_mmlist_lock);
-                       up_read(&mm->mmap_sem);
-               }
        }
 
        /* Clean up stable nodes, but don't worry if some are still busy */
@@ -1663,8 +1661,15 @@ next_mm:
                up_read(&mm->mmap_sem);
                mmdrop(mm);
        } else {
-               spin_unlock(&ksm_mmlist_lock);
                up_read(&mm->mmap_sem);
+               /*
+                * up_read(&mm->mmap_sem) first because after
+                * spin_unlock(&ksm_mmlist_lock) run, the "mm" may
+                * already have been freed under us by __ksm_exit()
+                * because the "mm_slot" is still hashed and
+                * ksm_scan.mm_slot doesn't point to it anymore.
+                */
+               spin_unlock(&ksm_mmlist_lock);
        }
 
        /* Repeat until we've completed scanning the whole list */
index 305537fc86406076ba07b3cdfeb6bf7affbc8ff6..07493e34ab7e281936d43cb84e681728a23cbc4a 100644 (file)
@@ -1222,15 +1222,8 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                next = pmd_addr_end(addr, end);
                if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
                        if (next - addr != HPAGE_PMD_SIZE) {
-#ifdef CONFIG_DEBUG_VM
-                               if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
-                                       pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
-                                               __func__, addr, end,
-                                               vma->vm_start,
-                                               vma->vm_end);
-                                       BUG();
-                               }
-#endif
+                               VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
+                                   !rwsem_is_locked(&tlb->mm->mmap_sem), vma);
                                split_huge_pmd(vma, pmd, addr);
                        } else if (zap_huge_pmd(tlb, vma, pmd, addr))
                                goto next;
@@ -2380,6 +2373,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * not dirty accountable.
         */
        if (PageAnon(old_page) && !PageKsm(old_page)) {
+               int total_mapcount;
                if (!trylock_page(old_page)) {
                        get_page(old_page);
                        pte_unmap_unlock(page_table, ptl);
@@ -2394,13 +2388,18 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        }
                        put_page(old_page);
                }
-               if (reuse_swap_page(old_page)) {
-                       /*
-                        * The page is all ours.  Move it to our anon_vma so
-                        * the rmap code will not search our parent or siblings.
-                        * Protected against the rmap code by the page lock.
-                        */
-                       page_move_anon_rmap(old_page, vma, address);
+               if (reuse_swap_page(old_page, &total_mapcount)) {
+                       if (total_mapcount == 1) {
+                               /*
+                                * The page is all ours. Move it to
+                                * our anon_vma so the rmap code will
+                                * not search our parent or siblings.
+                                * Protected against the rmap code by
+                                * the page lock.
+                                */
+                               page_move_anon_rmap(compound_head(old_page),
+                                                   vma, address);
+                       }
                        unlock_page(old_page);
                        return wp_page_reuse(mm, vma, address, page_table, ptl,
                                             orig_pte, old_page, 0, 0);
@@ -2624,7 +2623,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        inc_mm_counter_fast(mm, MM_ANONPAGES);
        dec_mm_counter_fast(mm, MM_SWAPENTS);
        pte = mk_pte(page, vma->vm_page_prot);
-       if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
+       if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                flags &= ~FAULT_FLAG_WRITE;
                ret |= VM_FAULT_WRITE;
index 999792d35ccc0faee6c4f85d70b6e31876855284..bc5149d5ec38016da91a8b1c85aeca0193143f0c 100644 (file)
@@ -1910,7 +1910,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
        if (gdtc->dirty > gdtc->bg_thresh)
                return true;
 
-       if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(gdtc))
+       if (wb_stat(wb, WB_RECLAIMABLE) >
+           wb_calc_thresh(gdtc->wb, gdtc->bg_thresh))
                return true;
 
        if (mdtc) {
@@ -1924,7 +1925,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
                if (mdtc->dirty > mdtc->bg_thresh)
                        return true;
 
-               if (wb_stat(wb, WB_RECLAIMABLE) > __wb_calc_thresh(mdtc))
+               if (wb_stat(wb, WB_RECLAIMABLE) >
+                   wb_calc_thresh(mdtc->wb, mdtc->bg_thresh))
                        return true;
        }
 
index 59de90d5d3a362c7293e624e748cd7b6ebab73b3..c1069efcc4d7477a5fc517303b67747f89b77074 100644 (file)
@@ -6485,7 +6485,7 @@ int __meminit init_per_zone_wmark_min(void)
        setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_wmark_min)
+core_initcall(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
index 83874eced5bfa0ac4c889cb0d65ecf22cfa24af0..031713ab40ce9fdeb289e149605e6d1514f345c0 100644 (file)
@@ -922,18 +922,19 @@ out:
  * to it.  And as a side-effect, free up its swap: because the old content
  * on disk will never be read, and seeking back there to write new content
  * later would only waste time away from clustering.
+ *
+ * NOTE: total_mapcount should not be relied upon by the caller if
+ * reuse_swap_page() returns false, but it may be always overwritten
+ * (see the other implementation for CONFIG_SWAP=n).
  */
-int reuse_swap_page(struct page *page)
+bool reuse_swap_page(struct page *page, int *total_mapcount)
 {
        int count;
 
        VM_BUG_ON_PAGE(!PageLocked(page), page);
        if (unlikely(PageKsm(page)))
-               return 0;
-       /* The page is part of THP and cannot be reused */
-       if (PageTransCompound(page))
-               return 0;
-       count = page_mapcount(page);
+               return false;
+       count = page_trans_huge_mapcount(page, total_mapcount);
        if (count <= 1 && PageSwapCache(page)) {
                count += page_swapcount(page);
                if (count == 1 && !PageWriteback(page)) {
index e72efb109fde5e5dc23007fa302a21cba7d103c5..fe47fbba995abd4e9911f58f9b9741ed7cc8b08a 100644 (file)
@@ -1735,10 +1735,13 @@ static struct page *isolate_source_page(struct size_class *class)
 static unsigned long zs_can_compact(struct size_class *class)
 {
        unsigned long obj_wasted;
+       unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
+       unsigned long obj_used = zs_stat_get(class, OBJ_USED);
 
-       obj_wasted = zs_stat_get(class, OBJ_ALLOCATED) -
-               zs_stat_get(class, OBJ_USED);
+       if (obj_allocated <= obj_used)
+               return 0;
 
+       obj_wasted = obj_allocated - obj_used;
        obj_wasted /= get_maxobj_per_zspage(class->size,
                        class->pages_per_zspage);
 
index 91dad80d068b75de629105ec0e688ce16335e16f..de0f119b1780b2af14d6bf6868f0a8da0f1ee236 100644 (file)
@@ -170,6 +170,8 @@ static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
 static LIST_HEAD(zswap_pools);
 /* protects zswap_pools list modification */
 static DEFINE_SPINLOCK(zswap_pools_lock);
+/* pool counter to provide unique names to zpool */
+static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 
 /* used by param callback function */
 static bool zswap_init_started;
@@ -565,6 +567,7 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 {
        struct zswap_pool *pool;
+       char name[38]; /* 'zswap' + 32 char (max) num + \0 */
        gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
 
        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
@@ -573,7 +576,10 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
                return NULL;
        }
 
-       pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops);
+       /* unique name for each pool specifically required by zsmalloc */
+       snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
+
+       pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
        if (!pool->zpool) {
                pr_err("%s zpool not available\n", type);
                goto error;
index 3315b9a598af0f71a95a080d450cf1faf3a70a1d..4026f198a7345c91a34ef1c8aed6f750e7b27d3e 100644 (file)
 
 #include "bat_v_elp.h"
 #include "bat_v_ogm.h"
+#include "hard-interface.h"
 #include "hash.h"
 #include "originator.h"
 #include "packet.h"
 
+static void batadv_v_iface_activate(struct batadv_hard_iface *hard_iface)
+{
+       /* B.A.T.M.A.N. V does not use any queuing mechanism, therefore it can
+        * set the interface as ACTIVE right away, without any risk of race
+        * condition
+        */
+       if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
+               hard_iface->if_status = BATADV_IF_ACTIVE;
+}
+
 static int batadv_v_iface_enable(struct batadv_hard_iface *hard_iface)
 {
        int ret;
@@ -274,6 +285,7 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
 
 static struct batadv_algo_ops batadv_batman_v __read_mostly = {
        .name = "BATMAN_V",
+       .bat_iface_activate = batadv_v_iface_activate,
        .bat_iface_enable = batadv_v_iface_enable,
        .bat_iface_disable = batadv_v_iface_disable,
        .bat_iface_update_mac = batadv_v_iface_update_mac,
index e96d7c745b4a1de6444284f38207ed599925ac2e..3e6b2624f9809365ac29da7f88a3895af3df8728 100644 (file)
@@ -568,6 +568,7 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * be sent to
  * @bat_priv: the bat priv with all the soft interface information
  * @ip_dst: ipv4 to look up in the DHT
+ * @vid: VLAN identifier
  *
  * An originator O is selected if and only if its DHT_ID value is one of three
  * closest values (from the LEFT, with wrap around if needed) then the hash
@@ -576,7 +577,8 @@ static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
  * Return: the candidate array of size BATADV_DAT_CANDIDATE_NUM.
  */
 static struct batadv_dat_candidate *
-batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
+batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst,
+                            unsigned short vid)
 {
        int select;
        batadv_dat_addr_t last_max = BATADV_DAT_ADDR_MAX, ip_key;
@@ -592,7 +594,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
                return NULL;
 
        dat.ip = ip_dst;
-       dat.vid = 0;
+       dat.vid = vid;
        ip_key = (batadv_dat_addr_t)batadv_hash_dat(&dat,
                                                    BATADV_DAT_ADDR_MAX);
 
@@ -612,6 +614,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  * @bat_priv: the bat priv with all the soft interface information
  * @skb: payload to send
  * @ip: the DHT key
+ * @vid: VLAN identifier
  * @packet_subtype: unicast4addr packet subtype to use
  *
  * This function copies the skb with pskb_copy() and is sent as unicast packet
@@ -622,7 +625,7 @@ batadv_dat_select_candidates(struct batadv_priv *bat_priv, __be32 ip_dst)
  */
 static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
                                 struct sk_buff *skb, __be32 ip,
-                                int packet_subtype)
+                                unsigned short vid, int packet_subtype)
 {
        int i;
        bool ret = false;
@@ -631,7 +634,7 @@ static bool batadv_dat_send_data(struct batadv_priv *bat_priv,
        struct sk_buff *tmp_skb;
        struct batadv_dat_candidate *cand;
 
-       cand = batadv_dat_select_candidates(bat_priv, ip);
+       cand = batadv_dat_select_candidates(bat_priv, ip, vid);
        if (!cand)
                goto out;
 
@@ -1022,7 +1025,7 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
                ret = true;
        } else {
                /* Send the request to the DHT */
-               ret = batadv_dat_send_data(bat_priv, skb, ip_dst,
+               ret = batadv_dat_send_data(bat_priv, skb, ip_dst, vid,
                                           BATADV_P_DAT_DHT_GET);
        }
 out:
@@ -1150,8 +1153,8 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
        /* Send the ARP reply to the candidates for both the IP addresses that
         * the node obtained from the ARP reply
         */
-       batadv_dat_send_data(bat_priv, skb, ip_src, BATADV_P_DAT_DHT_PUT);
-       batadv_dat_send_data(bat_priv, skb, ip_dst, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_src, vid, BATADV_P_DAT_DHT_PUT);
+       batadv_dat_send_data(bat_priv, skb, ip_dst, vid, BATADV_P_DAT_DHT_PUT);
 }
 
 /**
index b22b2775a0a5ff706c17ea06c39c9bb092da1f9a..0a7deaf2670a981078d0c5c3de1805f69c265796 100644 (file)
@@ -407,6 +407,9 @@ batadv_hardif_activate_interface(struct batadv_hard_iface *hard_iface)
 
        batadv_update_min_mtu(hard_iface->soft_iface);
 
+       if (bat_priv->bat_algo_ops->bat_iface_activate)
+               bat_priv->bat_algo_ops->bat_iface_activate(hard_iface);
+
 out:
        if (primary_if)
                batadv_hardif_put(primary_if);
@@ -572,8 +575,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
        struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
        struct batadv_hard_iface *primary_if = NULL;
 
-       if (hard_iface->if_status == BATADV_IF_ACTIVE)
-               batadv_hardif_deactivate_interface(hard_iface);
+       batadv_hardif_deactivate_interface(hard_iface);
 
        if (hard_iface->if_status != BATADV_IF_INACTIVE)
                goto out;
index e4cbb0753e37ff6681a20e8912df1f4ad1e74a05..c355a824713cd9224e2c705e222c2061cb504b5a 100644 (file)
@@ -250,7 +250,6 @@ static void batadv_neigh_node_release(struct kref *ref)
 {
        struct hlist_node *node_tmp;
        struct batadv_neigh_node *neigh_node;
-       struct batadv_hardif_neigh_node *hardif_neigh;
        struct batadv_neigh_ifinfo *neigh_ifinfo;
        struct batadv_algo_ops *bao;
 
@@ -262,13 +261,7 @@ static void batadv_neigh_node_release(struct kref *ref)
                batadv_neigh_ifinfo_put(neigh_ifinfo);
        }
 
-       hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
-                                              neigh_node->addr);
-       if (hardif_neigh) {
-               /* batadv_hardif_neigh_get() increases refcount too */
-               batadv_hardif_neigh_put(hardif_neigh);
-               batadv_hardif_neigh_put(hardif_neigh);
-       }
+       batadv_hardif_neigh_put(neigh_node->hardif_neigh);
 
        if (bao->bat_neigh_free)
                bao->bat_neigh_free(neigh_node);
@@ -663,6 +656,11 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        ether_addr_copy(neigh_node->addr, neigh_addr);
        neigh_node->if_incoming = hard_iface;
        neigh_node->orig_node = orig_node;
+       neigh_node->last_seen = jiffies;
+
+       /* increment unique neighbor refcount */
+       kref_get(&hardif_neigh->refcount);
+       neigh_node->hardif_neigh = hardif_neigh;
 
        /* extra reference for return */
        kref_init(&neigh_node->refcount);
@@ -672,9 +670,6 @@ batadv_neigh_node_new(struct batadv_orig_node *orig_node,
        hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
        spin_unlock_bh(&orig_node->neigh_list_lock);
 
-       /* increment unique neighbor refcount */
-       kref_get(&hardif_neigh->refcount);
-
        batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
                   "Creating new neighbor %pM for orig_node %pM on interface %s\n",
                   neigh_addr, orig_node->orig, hard_iface->net_dev->name);
index 4dd646a52f1a16ca297a3cdb1abeff56206df166..b781bf75325061a5157c54bc976d1df1a522e0bd 100644 (file)
@@ -105,6 +105,15 @@ static void _batadv_update_route(struct batadv_priv *bat_priv,
                neigh_node = NULL;
 
        spin_lock_bh(&orig_node->neigh_list_lock);
+       /* curr_router used earlier may not be the current orig_ifinfo->router
+        * anymore because it was dereferenced outside of the neigh_list_lock
+        * protected region. After the new best neighbor has replace the current
+        * best neighbor the reference counter needs to decrease. Consequently,
+        * the code needs to ensure the curr_router variable contains a pointer
+        * to the replaced best neighbor.
+        */
+       curr_router = rcu_dereference_protected(orig_ifinfo->router, true);
+
        rcu_assign_pointer(orig_ifinfo->router, neigh_node);
        spin_unlock_bh(&orig_node->neigh_list_lock);
        batadv_orig_ifinfo_put(orig_ifinfo);
index 3ce06e0a91b1c125cdf5ff594db57529a2deaacf..76417850d3fcbb6ddf5d6f15c822cebdda4912da 100644 (file)
@@ -675,6 +675,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
+                       if (!forw_packet->own)
+                               atomic_inc(&bat_priv->bcast_queue_left);
+
                        batadv_forw_packet_free(forw_packet);
                }
        }
@@ -702,6 +705,9 @@ batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
 
                if (pending) {
                        hlist_del(&forw_packet->list);
+                       if (!forw_packet->own)
+                               atomic_inc(&bat_priv->batman_queue_left);
+
                        batadv_forw_packet_free(forw_packet);
                }
        }
index 0710379491bffc0627c35f0ed5ba6bbf535bb1bb..8a136b6a1ff06165b7e7c257fd8b00847fbf3065 100644 (file)
@@ -408,11 +408,17 @@ void batadv_interface_rx(struct net_device *soft_iface,
         */
        nf_reset(skb);
 
+       if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
+               goto dropped;
+
        vid = batadv_get_vid(skb, 0);
        ethhdr = eth_hdr(skb);
 
        switch (ntohs(ethhdr->h_proto)) {
        case ETH_P_8021Q:
+               if (!pskb_may_pull(skb, VLAN_ETH_HLEN))
+                       goto dropped;
+
                vhdr = (struct vlan_ethhdr *)skb->data;
 
                if (vhdr->h_vlan_encapsulated_proto != ethertype)
@@ -424,8 +430,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
        }
 
        /* skb->dev & skb->pkt_type are set here */
-       if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
-               goto dropped;
        skb->protocol = eth_type_trans(skb, soft_iface);
 
        /* should not be necessary anymore as we use skb_pull_rcsum()
index 0b43e86328a59bb42f7200380b840b0209fd26a1..9b4551a86535c6fb0264948cfa5aad63b8ee6dbb 100644 (file)
@@ -215,6 +215,8 @@ static void batadv_tt_local_entry_release(struct kref *ref)
        tt_local_entry = container_of(ref, struct batadv_tt_local_entry,
                                      common.refcount);
 
+       batadv_softif_vlan_put(tt_local_entry->vlan);
+
        kfree_rcu(tt_local_entry, common.rcu);
 }
 
@@ -673,6 +675,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
        kref_get(&tt_local->common.refcount);
        tt_local->last_seen = jiffies;
        tt_local->common.added_at = tt_local->last_seen;
+       tt_local->vlan = vlan;
 
        /* the batman interface mac and multicast addresses should never be
         * purged
@@ -991,7 +994,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
        struct batadv_hard_iface *primary_if;
-       struct batadv_softif_vlan *vlan;
        struct hlist_head *head;
        unsigned short vid;
        u32 i;
@@ -1027,14 +1029,6 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                        last_seen_msecs = last_seen_msecs % 1000;
 
                        no_purge = tt_common_entry->flags & np_flag;
-
-                       vlan = batadv_softif_vlan_get(bat_priv, vid);
-                       if (!vlan) {
-                               seq_printf(seq, "Cannot retrieve VLAN %d\n",
-                                          BATADV_PRINT_VID(vid));
-                               continue;
-                       }
-
                        seq_printf(seq,
                                   " * %pM %4i [%c%c%c%c%c%c] %3u.%03u   (%#.8x)\n",
                                   tt_common_entry->addr,
@@ -1052,9 +1046,7 @@ int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
                                     BATADV_TT_CLIENT_ISOLA) ? 'I' : '.'),
                                   no_purge ? 0 : last_seen_secs,
                                   no_purge ? 0 : last_seen_msecs,
-                                  vlan->tt.crc);
-
-                       batadv_softif_vlan_put(vlan);
+                                  tt_local->vlan->tt.crc);
                }
                rcu_read_unlock();
        }
@@ -1099,7 +1091,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        u16 flags, curr_flags = BATADV_NO_FLAGS;
-       struct batadv_softif_vlan *vlan;
        void *tt_entry_exists;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
@@ -1139,14 +1130,6 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
        /* extra call to free the local tt entry */
        batadv_tt_local_entry_put(tt_local_entry);
 
-       /* decrease the reference held for this vlan */
-       vlan = batadv_softif_vlan_get(bat_priv, vid);
-       if (!vlan)
-               goto out;
-
-       batadv_softif_vlan_put(vlan);
-       batadv_softif_vlan_put(vlan);
-
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_put(tt_local_entry);
@@ -1219,7 +1202,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        u32 i;
@@ -1241,14 +1223,6 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv,
-                                                     tt_common_entry->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -3309,7 +3283,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
-       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3339,13 +3312,6 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                                                struct batadv_tt_local_entry,
                                                common);
 
-                       /* decrease the reference held for this vlan */
-                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
-                       if (vlan) {
-                               batadv_softif_vlan_put(vlan);
-                               batadv_softif_vlan_put(vlan);
-                       }
-
                        batadv_tt_local_entry_put(tt_local);
                }
                spin_unlock_bh(list_lock);
index 9abfb3e73c3448a9612c9c282166fe5e18b44327..1e47fbe8bb7b2e9b3ec79c64ef508d7305b5d1a7 100644 (file)
@@ -433,6 +433,7 @@ struct batadv_hardif_neigh_node {
  * @ifinfo_lock: lock protecting private ifinfo members and list
  * @if_incoming: pointer to incoming hard-interface
  * @last_seen: when last packet via this neighbor was received
+ * @hardif_neigh: hardif_neigh of this neighbor
  * @refcount: number of contexts the object is used
  * @rcu: struct used for freeing in an RCU-safe manner
  */
@@ -444,6 +445,7 @@ struct batadv_neigh_node {
        spinlock_t ifinfo_lock; /* protects ifinfo_list and its members */
        struct batadv_hard_iface *if_incoming;
        unsigned long last_seen;
+       struct batadv_hardif_neigh_node *hardif_neigh;
        struct kref refcount;
        struct rcu_head rcu;
 };
@@ -1073,10 +1075,12 @@ struct batadv_tt_common_entry {
  * struct batadv_tt_local_entry - translation table local entry data
  * @common: general translation table data
  * @last_seen: timestamp used for purging stale tt local entries
+ * @vlan: soft-interface vlan of the entry
  */
 struct batadv_tt_local_entry {
        struct batadv_tt_common_entry common;
        unsigned long last_seen;
+       struct batadv_softif_vlan *vlan;
 };
 
 /**
@@ -1250,6 +1254,8 @@ struct batadv_forw_packet {
  * struct batadv_algo_ops - mesh algorithm callbacks
  * @list: list node for the batadv_algo_list
  * @name: name of the algorithm
+ * @bat_iface_activate: start routing mechanisms when hard-interface is brought
+ *  up
  * @bat_iface_enable: init routing info when hard-interface is enabled
  * @bat_iface_disable: de-init routing info when hard-interface is disabled
  * @bat_iface_update_mac: (re-)init mac addresses of the protocol information
@@ -1277,6 +1283,7 @@ struct batadv_forw_packet {
 struct batadv_algo_ops {
        struct hlist_node list;
        char *name;
+       void (*bat_iface_activate)(struct batadv_hard_iface *hard_iface);
        int (*bat_iface_enable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_disable)(struct batadv_hard_iface *hard_iface);
        void (*bat_iface_update_mac)(struct batadv_hard_iface *hard_iface);
index 263b4de4de57ccf4beddbfc48e62967236450b9b..60a3dbfca8a1f45179dd691dfd8bed7eceee9e0e 100644 (file)
 #include <asm/uaccess.h>
 #include "br_private.h"
 
-/* called with RTNL */
 static int get_bridge_ifindices(struct net *net, int *indices, int num)
 {
        struct net_device *dev;
        int i = 0;
 
-       for_each_netdev(net, dev) {
+       rcu_read_lock();
+       for_each_netdev_rcu(net, dev) {
                if (i >= num)
                        break;
                if (dev->priv_flags & IFF_EBRIDGE)
                        indices[i++] = dev->ifindex;
        }
+       rcu_read_unlock();
 
        return i;
 }
index 191ea66e4d929c8b1237f0d13e783e8ea415ed93..6852f3c7009c2b1cc2cbfa9e08b6b15104b4d12b 100644 (file)
@@ -1279,6 +1279,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
        struct br_ip saddr;
        unsigned long max_delay;
        unsigned long now = jiffies;
+       unsigned int offset = skb_transport_offset(skb);
        __be32 group;
        int err = 0;
 
@@ -1289,14 +1290,14 @@ static int br_ip4_multicast_query(struct net_bridge *br,
 
        group = ih->group;
 
-       if (skb->len == sizeof(*ih)) {
+       if (skb->len == offset + sizeof(*ih)) {
                max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
 
                if (!max_delay) {
                        max_delay = 10 * HZ;
                        group = 0;
                }
-       } else if (skb->len >= sizeof(*ih3)) {
+       } else if (skb->len >= offset + sizeof(*ih3)) {
                ih3 = igmpv3_query_hdr(skb);
                if (ih3->nsrcs)
                        goto out;
@@ -1357,6 +1358,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
        struct br_ip saddr;
        unsigned long max_delay;
        unsigned long now = jiffies;
+       unsigned int offset = skb_transport_offset(skb);
        const struct in6_addr *group = NULL;
        bool is_general_query;
        int err = 0;
@@ -1366,8 +1368,8 @@ static int br_ip6_multicast_query(struct net_bridge *br,
            (port && port->state == BR_STATE_DISABLED))
                goto out;
 
-       if (skb->len == sizeof(*mld)) {
-               if (!pskb_may_pull(skb, sizeof(*mld))) {
+       if (skb->len == offset + sizeof(*mld)) {
+               if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
                        err = -EINVAL;
                        goto out;
                }
@@ -1376,7 +1378,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
                if (max_delay)
                        group = &mld->mld_mca;
        } else {
-               if (!pskb_may_pull(skb, sizeof(*mld2q))) {
+               if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
                        err = -EINVAL;
                        goto out;
                }
index 77a71cd68535fc02ae939168934fa4fb4f419644..5c925ac50b9572755e35be168cde8c858fd4ba95 100644 (file)
@@ -2802,7 +2802,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
 
        if (skb->ip_summed != CHECKSUM_NONE &&
            !can_checksum_protocol(features, type)) {
-               features &= ~NETIF_F_CSUM_MASK;
+               features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
        } else if (illegal_highdma(skb->dev, skb)) {
                features &= ~NETIF_F_SG;
        }
index 1033725be40bd8f254ce27680e3b8abd09ad1546..3937b1b68d5bc7ad50691716ac1612332a5dc997 100644 (file)
@@ -92,8 +92,11 @@ static void flow_cache_gc_task(struct work_struct *work)
        list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
        spin_unlock_bh(&xfrm->flow_cache_gc_lock);
 
-       list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
+       list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) {
                flow_entry_kill(fce, xfrm);
+               atomic_dec(&xfrm->flow_cache_gc_count);
+               WARN_ON(atomic_read(&xfrm->flow_cache_gc_count) < 0);
+       }
 }
 
 static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
@@ -101,6 +104,7 @@ static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
                                     struct netns_xfrm *xfrm)
 {
        if (deleted) {
+               atomic_add(deleted, &xfrm->flow_cache_gc_count);
                fcp->hash_count -= deleted;
                spin_lock_bh(&xfrm->flow_cache_gc_lock);
                list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
@@ -232,6 +236,13 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
                if (fcp->hash_count > fc->high_watermark)
                        flow_cache_shrink(fc, fcp);
 
+               if (fcp->hash_count > 2 * fc->high_watermark ||
+                   atomic_read(&net->xfrm.flow_cache_gc_count) > fc->high_watermark) {
+                       atomic_inc(&net->xfrm.flow_cache_genid);
+                       flo = ERR_PTR(-ENOBUFS);
+                       goto ret_object;
+               }
+
                fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
                if (fle) {
                        fle->net = net;
@@ -446,6 +457,7 @@ int flow_cache_init(struct net *net)
        INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
        INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
        mutex_init(&net->xfrm.flow_flush_sem);
+       atomic_set(&net->xfrm.flow_cache_gc_count, 0);
 
        fc->hash_shift = 10;
        fc->low_watermark = 2 * flow_cache_hash_size(fc);
index a75f7e94b4456eed264a8d52c460f0b6bbf72569..65763c29f84549583cf55546ff9a45bbe921a598 100644 (file)
@@ -1180,14 +1180,16 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
 
 static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
 {
-       struct rtnl_link_ifmap map = {
-               .mem_start   = dev->mem_start,
-               .mem_end     = dev->mem_end,
-               .base_addr   = dev->base_addr,
-               .irq         = dev->irq,
-               .dma         = dev->dma,
-               .port        = dev->if_port,
-       };
+       struct rtnl_link_ifmap map;
+
+       memset(&map, 0, sizeof(map));
+       map.mem_start   = dev->mem_start;
+       map.mem_end     = dev->mem_end;
+       map.base_addr   = dev->base_addr;
+       map.irq         = dev->irq;
+       map.dma         = dev->dma;
+       map.port        = dev->if_port;
+
        if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
                return -EMSGSIZE;
 
index d97268e8ff103bd229de4fb99bbb40f51e61082d..2b68418c7198009e2477961c4b34503dc1439526 100644 (file)
@@ -975,6 +975,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
                        val = 65535 - 40;
                if (type == RTAX_MTU && val > 65535 - 15)
                        val = 65535 - 15;
+               if (type == RTAX_HOPLIMIT && val > 255)
+                       val = 255;
                if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
                        return -EINVAL;
                fi->fib_metrics[type - 1] = val;
index a39068b4a4d99383096c3c970c4c15f1ee464617..a6962ccad98a5f391ac4d403032303ed1f3cbabb 100644 (file)
@@ -228,8 +228,6 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff,
        int err = -ENOSYS;
        const struct net_offload **offloads;
 
-       udp_tunnel_gro_complete(skb, nhoff);
-
        rcu_read_lock();
        offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
        ops = rcu_dereference(offloads[proto]);
@@ -238,6 +236,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff,
 
        err = ops->callbacks.gro_complete(skb, nhoff);
 
+       skb_set_inner_mac_header(skb, nhoff);
+
 out_unlock:
        rcu_read_unlock();
 
@@ -414,6 +414,8 @@ static int gue_gro_complete(struct sk_buff *skb, int nhoff,
 
        err = ops->callbacks.gro_complete(skb, nhoff + guehlen);
 
+       skb_set_inner_mac_header(skb, nhoff + guehlen);
+
 out_unlock:
        rcu_read_unlock();
        return err;
index bc68eced0105716dca6f04abbdd491a1cfb9e908..0d9e9d7bb029373e37e4de6ed4003397d41f82cd 100644 (file)
@@ -470,6 +470,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                                                     const struct sock *sk2,
                                                     bool match_wildcard))
 {
+       struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
        struct sock *sk2;
        struct hlist_nulls_node *node;
        kuid_t uid = sock_i_uid(sk);
@@ -479,6 +480,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
                    sk2->sk_family == sk->sk_family &&
                    ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
                    sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+                   inet_csk(sk2)->icsk_bind_hash == tb &&
                    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
                    saddr_same(sk, sk2, false))
                        return reuseport_add_sock(sk, sk2);
index af5d1f38217f4e4dcb977b6410d0d9a6a6c1e87c..4cc84212cce149e4a089e5f986f1a596596744cc 100644 (file)
@@ -179,6 +179,7 @@ static __be16 tnl_flags_to_gre_flags(__be16 tflags)
        return flags;
 }
 
+/* Fills in tpi and returns header length to be pulled. */
 static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                            bool *csum_err)
 {
@@ -238,7 +239,7 @@ static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
                                return -EINVAL;
                }
        }
-       return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
+       return hdr_len;
 }
 
 static void ipgre_err(struct sk_buff *skb, u32 info,
@@ -341,7 +342,7 @@ static void gre_err(struct sk_buff *skb, u32 info)
        struct tnl_ptk_info tpi;
        bool csum_err = false;
 
-       if (parse_gre_header(skb, &tpi, &csum_err)) {
+       if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
                if (!csum_err)          /* ignore csum errors. */
                        return;
        }
@@ -397,7 +398,10 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
                                  iph->saddr, iph->daddr, tpi->key);
 
        if (tunnel) {
-               skb_pop_mac_header(skb);
+               if (tunnel->dev->type != ARPHRD_NONE)
+                       skb_pop_mac_header(skb);
+               else
+                       skb_reset_mac_header(skb);
                if (tunnel->collect_md) {
                        __be16 flags;
                        __be64 tun_id;
@@ -419,6 +423,7 @@ static int gre_rcv(struct sk_buff *skb)
 {
        struct tnl_ptk_info tpi;
        bool csum_err = false;
+       int hdr_len;
 
 #ifdef CONFIG_NET_IPGRE_BROADCAST
        if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
@@ -428,7 +433,10 @@ static int gre_rcv(struct sk_buff *skb)
        }
 #endif
 
-       if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+       hdr_len = parse_gre_header(skb, &tpi, &csum_err);
+       if (hdr_len < 0)
+               goto drop;
+       if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false) < 0)
                goto drop;
 
        if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
@@ -523,7 +531,8 @@ static struct rtable *gre_get_rt(struct sk_buff *skb,
        return ip_route_output_key(net, fl);
 }
 
-static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
+static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
+                       __be16 proto)
 {
        struct ip_tunnel_info *tun_info;
        const struct ip_tunnel_key *key;
@@ -575,7 +584,7 @@ static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        flags = tun_info->key.tun_flags & (TUNNEL_CSUM | TUNNEL_KEY);
-       build_header(skb, tunnel_hlen, flags, htons(ETH_P_TEB),
+       build_header(skb, tunnel_hlen, flags, proto,
                     tunnel_id_to_key(tun_info->key.tun_id), 0);
 
        df = key->tun_flags & TUNNEL_DONT_FRAGMENT ?  htons(IP_DF) : 0;
@@ -616,7 +625,7 @@ static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
        const struct iphdr *tnl_params;
 
        if (tunnel->collect_md) {
-               gre_fb_xmit(skb, dev);
+               gre_fb_xmit(skb, dev, skb->protocol);
                return NETDEV_TX_OK;
        }
 
@@ -660,7 +669,7 @@ static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
        struct ip_tunnel *tunnel = netdev_priv(dev);
 
        if (tunnel->collect_md) {
-               gre_fb_xmit(skb, dev);
+               gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
                return NETDEV_TX_OK;
        }
 
@@ -893,7 +902,7 @@ static int ipgre_tunnel_init(struct net_device *dev)
        netif_keep_dst(dev);
        dev->addr_len           = 4;
 
-       if (iph->daddr) {
+       if (iph->daddr && !tunnel->collect_md) {
 #ifdef CONFIG_NET_IPGRE_BROADCAST
                if (ipv4_is_multicast(iph->daddr)) {
                        if (!iph->saddr)
@@ -902,8 +911,9 @@ static int ipgre_tunnel_init(struct net_device *dev)
                        dev->header_ops = &ipgre_header_ops;
                }
 #endif
-       } else
+       } else if (!tunnel->collect_md) {
                dev->header_ops = &ipgre_header_ops;
+       }
 
        return ip_tunnel_init(dev);
 }
@@ -946,6 +956,11 @@ static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
        if (flags & (GRE_VERSION|GRE_ROUTING))
                return -EINVAL;
 
+       if (data[IFLA_GRE_COLLECT_METADATA] &&
+           data[IFLA_GRE_ENCAP_TYPE] &&
+           nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
+               return -EINVAL;
+
        return 0;
 }
 
@@ -1019,6 +1034,8 @@ static void ipgre_netlink_parms(struct net_device *dev,
                struct ip_tunnel *t = netdev_priv(dev);
 
                t->collect_md = true;
+               if (dev->type == ARPHRD_IPGRE)
+                       dev->type = ARPHRD_NONE;
        }
 }
 
index 6aad0192443d49966785f0de67ee30934775dfca..a69ed94bda1b107634f0aacb6dbedb3d03cc87b0 100644 (file)
@@ -326,12 +326,12 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
 
                if (!IS_ERR(rt)) {
                        tdev = rt->dst.dev;
-                       dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
-                                         fl4.saddr);
                        ip_rt_put(rt);
                }
                if (dev->type != ARPHRD_ETHER)
                        dev->flags |= IFF_POINTOPOINT;
+
+               dst_cache_reset(&tunnel->dst_cache);
        }
 
        if (!tdev && tunnel->parms.link)
index 5cf10b777b7e4bb8b1037201a2c8bfeeb031c7cc..a917903d5e9742fb07bac1b2a7fa94ee069c0d54 100644 (file)
@@ -156,6 +156,7 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
        struct dst_entry *dst = skb_dst(skb);
        struct net_device *tdev;        /* Device to other host */
        int err;
+       int mtu;
 
        if (!dst) {
                dev->stats.tx_carrier_errors++;
@@ -192,6 +193,23 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
                        tunnel->err_count = 0;
        }
 
+       mtu = dst_mtu(dst);
+       if (skb->len > mtu) {
+               skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+               } else {
+                       if (mtu < IPV6_MIN_MTU)
+                               mtu = IPV6_MIN_MTU;
+
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               }
+
+               dst_release(dst);
+               goto tx_error;
+       }
+
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
index 441ae9da3a233fd96385853197fd3938959449e3..79a03b87a7714b1ade63aa78ce80101a182e2597 100644 (file)
@@ -2640,8 +2640,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         */
        if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) ||
                     skb_headroom(skb) >= 0xFFFF)) {
-               struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
-                                                  GFP_ATOMIC);
+               struct sk_buff *nskb;
+
+               skb_mstamp_get(&skb->skb_mstamp);
+               nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
                err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
                             -ENOBUFS;
        } else {
index 0ed2dafb7cc405e1003f6044d42701abd3969d63..e330c0e56b118f47e125c1459ffb98e9c8998d77 100644 (file)
@@ -399,6 +399,11 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
 
        uh->len = newlen;
 
+       /* Set encapsulation before calling into inner gro_complete() functions
+        * to make them set up the inner offsets.
+        */
+       skb->encapsulation = 1;
+
        rcu_read_lock();
 
        uo_priv = rcu_dereference(udp_offload_base);
@@ -421,9 +426,6 @@ int udp_gro_complete(struct sk_buff *skb, int nhoff)
        if (skb->remcsum_offload)
                skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
 
-       skb->encapsulation = 1;
-       skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr));
-
        return err;
 }
 
index 0a37ddc7af51579f56b644ba0e4c3c3a7a2e2bc7..0013cacf7164c6eec1816b8bd0d08ca01dc7e81d 100644 (file)
@@ -445,6 +445,8 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
 
        if (__ipv6_addr_needs_scope_id(addr_type))
                iif = skb->dev->ifindex;
+       else
+               iif = l3mdev_master_ifindex(skb->dev);
 
        /*
         *      Must not send error if the source does not uniquely
@@ -499,9 +501,6 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
        else if (!fl6.flowi6_oif)
                fl6.flowi6_oif = np->ucast_oif;
 
-       if (!fl6.flowi6_oif)
-               fl6.flowi6_oif = l3mdev_master_ifindex(skb->dev);
-
        dst = icmpv6_route_lookup(net, skb, sk, &fl6);
        if (IS_ERR(dst))
                goto out;
index 2ae3c4fd8aabc65a7206a73480b3dca4b975f7e3..41f18de5dcc2cda4686af5fe89c8ab4760966d91 100644 (file)
@@ -120,8 +120,7 @@ nla_put_failure:
 
 static int ila_encap_nlsize(struct lwtunnel_state *lwtstate)
 {
-       /* No encapsulation overhead */
-       return 0;
+       return nla_total_size(sizeof(u64)); /* ILA_ATTR_LOCATOR */
 }
 
 static int ila_encap_cmp(struct lwtunnel_state *a, struct lwtunnel_state *b)
index d916d6ab9ad29aeaeb4bfc57276549f3ae839f40..6f32944e0223fc3e83019ae9ff4fc5cade2cefef 100644 (file)
@@ -1750,6 +1750,8 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
                } else {
                        val = nla_get_u32(nla);
                }
+               if (type == RTAX_HOPLIMIT && val > 255)
+                       val = 255;
                if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
                        goto err;
 
index 711d209f912473b28eddf3eb7f83a51d568d9f4d..f443c6b0ce162766ba83ceae48a7e25791161017 100644 (file)
@@ -810,8 +810,13 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
        fl6.flowi6_proto = IPPROTO_TCP;
        if (rt6_need_strict(&fl6.daddr) && !oif)
                fl6.flowi6_oif = tcp_v6_iif(skb);
-       else
+       else {
+               if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
+                       oif = skb->skb_iif;
+
                fl6.flowi6_oif = oif;
+       }
+
        fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
        fl6.fl6_dport = t1->dest;
        fl6.fl6_sport = t1->source;
index afca2eb4dfa777c75288dfb6fce9636b309a2ebc..6edfa99803148815e383eb13ce6a9c1eb098058d 100644 (file)
@@ -1376,9 +1376,9 @@ static int l2tp_tunnel_sock_create(struct net *net,
                        memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
                               sizeof(udp_conf.peer_ip6));
                        udp_conf.use_udp6_tx_checksums =
-                           cfg->udp6_zero_tx_checksums;
+                         ! cfg->udp6_zero_tx_checksums;
                        udp_conf.use_udp6_rx_checksums =
-                           cfg->udp6_zero_rx_checksums;
+                         ! cfg->udp6_zero_rx_checksums;
                } else
 #endif
                {
index b3c52e3f689ad16468eb603f1a5d11c55f3d7ae5..8ae3ed97d95cb4a5f05b69b8c2922d57072a6fd1 100644 (file)
@@ -626,6 +626,7 @@ static void llc_cmsg_rcv(struct msghdr *msg, struct sk_buff *skb)
        if (llc->cmsg_flags & LLC_CMSG_PKTINFO) {
                struct llc_pktinfo info;
 
+               memset(&info, 0, sizeof(info));
                info.lpi_ifindex = llc_sk(skb->sk)->dev->ifindex;
                llc_pdu_decode_dsap(skb, &info.lpi_sap);
                llc_pdu_decode_da(skb, info.lpi_mac);
index 453b4e7417804105cb136e2ec7f4c57a39392db3..e1cb22c16530377cc8ce0d9bf907bbd662f18337 100644 (file)
@@ -1761,7 +1761,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = dev_alloc_name(ndev, ndev->name);
                if (ret < 0) {
-                       free_netdev(ndev);
+                       ieee80211_if_free(ndev);
                        return ret;
                }
 
@@ -1847,7 +1847,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
 
                ret = register_netdevice(ndev);
                if (ret) {
-                       free_netdev(ndev);
+                       ieee80211_if_free(ndev);
                        return ret;
                }
        }
index afde5f5e728a320773be246ecbcfcca7f5a16617..e27fd17c6743b060b1167ab762b61a8e2a728589 100644 (file)
@@ -66,7 +66,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks);
 __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);
 EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);
 
-static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
+static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
 static __read_mostly bool nf_conntrack_locks_all;
 
 void nf_conntrack_lock(spinlock_t *lock) __acquires(lock)
@@ -1778,6 +1778,7 @@ void nf_conntrack_init_end(void)
 
 int nf_conntrack_init_net(struct net *net)
 {
+       static atomic64_t unique_id;
        int ret = -ENOMEM;
        int cpu;
 
@@ -1800,7 +1801,8 @@ int nf_conntrack_init_net(struct net *net)
        if (!net->ct.stat)
                goto err_pcpu_lists;
 
-       net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
+       net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%llu",
+                               (u64)atomic64_inc_return(&unique_id));
        if (!net->ct.slabname)
                goto err_slabname;
 
index 4c2b4c0c4d5fa4ac209ab85020e97e14ed716ab9..dbd0803b18273bbf3b37e7f0f5bfae87d5acfc2d 100644 (file)
@@ -96,6 +96,8 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl,
                        return -EINVAL;
                if (flags & NFACCT_F_OVERQUOTA)
                        return -EINVAL;
+               if ((flags & NFACCT_F_QUOTA) && !tb[NFACCT_QUOTA])
+                       return -EINVAL;
 
                size += sizeof(u64);
        }
index 29d2c31f406ca585d5f0eb1f08bcaf26d8364053..daf45da448fab4406cf4b5727404c88c1f0759be 100644 (file)
@@ -236,6 +236,7 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
 
                list_del(&info->timer->entry);
                del_timer_sync(&info->timer->timer);
+               cancel_work_sync(&info->timer->work);
                sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
                kfree(info->timer->attr.attr.name);
                kfree(info->timer);
index b5fea1101faaa52162d438a8cb5e91d3a83ec8bd..10c84d8828816ca88e6ee3b953f9e345465600c1 100644 (file)
@@ -776,6 +776,19 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key,
                        return -EINVAL;
                }
 
+               /* Userspace may decide to perform a ct lookup without a helper
+                * specified followed by a (recirculate and) commit with one.
+                * Therefore, for unconfirmed connections which we will commit,
+                * we need to attach the helper here.
+                */
+               if (!nf_ct_is_confirmed(ct) && info->commit &&
+                   info->helper && !nfct_help(ct)) {
+                       int err = __nf_ct_try_assign_helper(ct, info->ct,
+                                                           GFP_ATOMIC);
+                       if (err)
+                               return err;
+               }
+
                /* Call the helper only if:
                 * - nf_conntrack_in() was executed above ("!cached") for a
                 *   confirmed connection, or
index 61ed2a8764ba4c7a66788d7843cef18abb9e904a..86187dad14403100ff6199ee79beed5262de818f 100644 (file)
@@ -127,7 +127,7 @@ void rds_tcp_restore_callbacks(struct socket *sock,
 
 /*
  * This is the only path that sets tc->t_sock.  Send and receive trust that
- * it is set.  The RDS_CONN_CONNECTED bit protects those paths from being
+ * it is set.  The RDS_CONN_UP bit protects those paths from being
  * called while it isn't set.
  */
 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
@@ -216,6 +216,7 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
        if (!tc)
                return -ENOMEM;
 
+       mutex_init(&tc->t_conn_lock);
        tc->t_sock = NULL;
        tc->t_tinc = NULL;
        tc->t_tinc_hdr_rem = sizeof(struct rds_header);
index 64f873c0c6b6d15574ec9771506d04e617a6aaac..41c228300525c029df02472b609ada1a71cea98e 100644 (file)
@@ -12,6 +12,10 @@ struct rds_tcp_connection {
 
        struct list_head        t_tcp_node;
        struct rds_connection   *conn;
+       /* t_conn_lock synchronizes the connection establishment between
+        * rds_tcp_accept_one and rds_tcp_conn_connect
+        */
+       struct mutex            t_conn_lock;
        struct socket           *t_sock;
        void                    *t_orig_write_space;
        void                    *t_orig_data_ready;
index 5cb16875c4603dba71c733de6600ada40e39cffc..49a3fcfed360edfb146416976c700b24e4520864 100644 (file)
@@ -78,7 +78,14 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        struct socket *sock = NULL;
        struct sockaddr_in src, dest;
        int ret;
+       struct rds_tcp_connection *tc = conn->c_transport_data;
+
+       mutex_lock(&tc->t_conn_lock);
 
+       if (rds_conn_up(conn)) {
+               mutex_unlock(&tc->t_conn_lock);
+               return 0;
+       }
        ret = sock_create_kern(rds_conn_net(conn), PF_INET,
                               SOCK_STREAM, IPPROTO_TCP, &sock);
        if (ret < 0)
@@ -120,6 +127,7 @@ int rds_tcp_conn_connect(struct rds_connection *conn)
        }
 
 out:
+       mutex_unlock(&tc->t_conn_lock);
        if (sock)
                sock_release(sock);
        return ret;
index 0936a4a32b476fdde5c7208fc465ec3324bbcf09..be263cdf268bae5b59a4746d4011d1042ede58cd 100644 (file)
@@ -76,7 +76,9 @@ int rds_tcp_accept_one(struct socket *sock)
        struct rds_connection *conn;
        int ret;
        struct inet_sock *inet;
-       struct rds_tcp_connection *rs_tcp;
+       struct rds_tcp_connection *rs_tcp = NULL;
+       int conn_state;
+       struct sock *nsk;
 
        ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
                               sock->sk->sk_type, sock->sk->sk_protocol,
@@ -115,28 +117,44 @@ int rds_tcp_accept_one(struct socket *sock)
         * rds_tcp_state_change() will do that cleanup
         */
        rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
-       if (rs_tcp->t_sock &&
-           ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
-               struct sock *nsk = new_sock->sk;
-
-               nsk->sk_user_data = NULL;
-               nsk->sk_prot->disconnect(nsk, 0);
-               tcp_done(nsk);
-               new_sock = NULL;
-               ret = 0;
-               goto out;
-       } else if (rs_tcp->t_sock) {
-               rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
-               conn->c_outgoing = 0;
-       }
-
        rds_conn_transition(conn, RDS_CONN_DOWN, RDS_CONN_CONNECTING);
+       mutex_lock(&rs_tcp->t_conn_lock);
+       conn_state = rds_conn_state(conn);
+       if (conn_state != RDS_CONN_CONNECTING && conn_state != RDS_CONN_UP)
+               goto rst_nsk;
+       if (rs_tcp->t_sock) {
+               /* Need to resolve a duelling SYN between peers.
+                * We have an outstanding SYN to this peer, which may
+                * potentially have transitioned to the RDS_CONN_UP state,
+                * so we must quiesce any send threads before resetting
+                * c_transport_data.
+                */
+               wait_event(conn->c_waitq,
+                          !test_bit(RDS_IN_XMIT, &conn->c_flags));
+               if (ntohl(inet->inet_saddr) < ntohl(inet->inet_daddr)) {
+                       goto rst_nsk;
+               } else if (rs_tcp->t_sock) {
+                       rds_tcp_restore_callbacks(rs_tcp->t_sock, rs_tcp);
+                       conn->c_outgoing = 0;
+               }
+       }
        rds_tcp_set_callbacks(new_sock, conn);
-       rds_connect_complete(conn);
+       rds_connect_complete(conn); /* marks RDS_CONN_UP */
+       new_sock = NULL;
+       ret = 0;
+       goto out;
+rst_nsk:
+       /* reset the newly returned accept sock and bail */
+       nsk = new_sock->sk;
+       rds_tcp_stats_inc(s_tcp_listen_closed_stale);
+       nsk->sk_user_data = NULL;
+       nsk->sk_prot->disconnect(nsk, 0);
+       tcp_done(nsk);
        new_sock = NULL;
        ret = 0;
-
 out:
+       if (rs_tcp)
+               mutex_unlock(&rs_tcp->t_conn_lock);
        if (new_sock)
                sock_release(new_sock);
        return ret;
index c589a9ba506af8ba1376f48162ec8224281a48a0..343d011aa81849240869bd95bac501895d6fb83e 100644 (file)
@@ -423,7 +423,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        u16 ife_type = 0;
        u8 *daddr = NULL;
        u8 *saddr = NULL;
-       int ret = 0;
+       int ret = 0, exists = 0;
        int err;
 
        err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy);
@@ -435,25 +435,29 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
 
        parm = nla_data(tb[TCA_IFE_PARMS]);
 
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
        if (parm->flags & IFE_ENCODE) {
                /* Until we get issued the ethertype, we cant have
                 * a default..
                **/
                if (!tb[TCA_IFE_TYPE]) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        pr_info("You MUST pass etherype for encoding\n");
                        return -EINVAL;
                }
        }
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a, sizeof(*ife),
                                      bind, false);
                if (ret)
                        return ret;
                ret = ACT_P_CREATED;
        } else {
-               if (bind)       /* dont override defaults */
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
@@ -495,6 +499,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
                                       NULL);
                if (err) {
 metadata_parse_err:
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        if (ret == ACT_P_CREATED)
                                _tcf_ife_cleanup(a, bind);
 
index 350e134cffb32b04f3e4c2b4b3917051cd55b456..8b5270008a6e5f723c6893a1fef1ad4a3de1af68 100644 (file)
@@ -96,7 +96,7 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        struct tcf_ipt *ipt;
        struct xt_entry_target *td, *t;
        char *tname;
-       int ret = 0, err;
+       int ret = 0, err, exists = 0;
        u32 hook = 0;
        u32 index = 0;
 
@@ -107,18 +107,23 @@ static int __tcf_ipt_init(struct tc_action_net *tn, struct nlattr *nla,
        if (err < 0)
                return err;
 
-       if (tb[TCA_IPT_HOOK] == NULL)
-               return -EINVAL;
-       if (tb[TCA_IPT_TARG] == NULL)
+       if (tb[TCA_IPT_INDEX] != NULL)
+               index = nla_get_u32(tb[TCA_IPT_INDEX]);
+
+       exists = tcf_hash_check(tn, index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (tb[TCA_IPT_HOOK] == NULL || tb[TCA_IPT_TARG] == NULL) {
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
+       }
 
        td = (struct xt_entry_target *)nla_data(tb[TCA_IPT_TARG]);
        if (nla_len(tb[TCA_IPT_TARG]) < td->u.target_size)
                return -EINVAL;
 
-       if (tb[TCA_IPT_INDEX] != NULL)
-               index = nla_get_u32(tb[TCA_IPT_INDEX]);
-
        if (!tcf_hash_check(tn, index, a, bind)) {
                ret = tcf_hash_create(tn, index, est, a, sizeof(*ipt), bind,
                                      false);
index e8a760cf7775ea1e3c9522376c3e3111a54a2ba6..8f3948dd38b85f70e654e317280db09e66c23f5c 100644 (file)
@@ -61,7 +61,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        struct tc_mirred *parm;
        struct tcf_mirred *m;
        struct net_device *dev;
-       int ret, ok_push = 0;
+       int ret, ok_push = 0, exists = 0;
 
        if (nla == NULL)
                return -EINVAL;
@@ -71,17 +71,27 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_MIRRED_PARMS] == NULL)
                return -EINVAL;
        parm = nla_data(tb[TCA_MIRRED_PARMS]);
+
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
        switch (parm->eaction) {
        case TCA_EGRESS_MIRROR:
        case TCA_EGRESS_REDIR:
                break;
        default:
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
        }
        if (parm->ifindex) {
                dev = __dev_get_by_index(net, parm->ifindex);
-               if (dev == NULL)
+               if (dev == NULL) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        return -ENODEV;
+               }
                switch (dev->type) {
                case ARPHRD_TUNNEL:
                case ARPHRD_TUNNEL6:
@@ -99,7 +109,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                dev = NULL;
        }
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                if (dev == NULL)
                        return -EINVAL;
                ret = tcf_hash_create(tn, parm->index, est, a,
@@ -108,9 +118,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        return ret;
                ret = ACT_P_CREATED;
        } else {
-               if (bind)
-                       return 0;
-
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index 75b2be13fbcc452da0a1379dd693fd9693b75e4d..3a33fb648a6d48db38627b66b1f54e0ab54bfcc7 100644 (file)
@@ -87,7 +87,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        struct tc_defact *parm;
        struct tcf_defact *d;
        char *defdata;
-       int ret = 0, err;
+       int ret = 0, err, exists = 0;
 
        if (nla == NULL)
                return -EINVAL;
@@ -99,13 +99,21 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_DEF_PARMS] == NULL)
                return -EINVAL;
 
-       if (tb[TCA_DEF_DATA] == NULL)
-               return -EINVAL;
 
        parm = nla_data(tb[TCA_DEF_PARMS]);
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (tb[TCA_DEF_DATA] == NULL) {
+               if (exists)
+                       tcf_hash_release(a, bind);
+               return -EINVAL;
+       }
+
        defdata = nla_data(tb[TCA_DEF_DATA]);
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a,
                                      sizeof(*d), bind, false);
                if (ret)
@@ -122,8 +130,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
        } else {
                d = to_defact(a);
 
-               if (bind)
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index cfcdbdc00c9bfef4702ae09875080cc0b3df11ea..69da5a8f0034eb8851a124863cf876324862138d 100644 (file)
@@ -69,7 +69,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
        struct tcf_skbedit *d;
        u32 flags = 0, *priority = NULL, *mark = NULL;
        u16 *queue_mapping = NULL;
-       int ret = 0, err;
+       int ret = 0, err, exists = 0;
 
        if (nla == NULL)
                return -EINVAL;
@@ -96,12 +96,18 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                mark = nla_data(tb[TCA_SKBEDIT_MARK]);
        }
 
-       if (!flags)
-               return -EINVAL;
-
        parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
+       if (!flags) {
+               tcf_hash_release(a, bind);
+               return -EINVAL;
+       }
+
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a,
                                      sizeof(*d), bind, false);
                if (ret)
@@ -111,8 +117,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                ret = ACT_P_CREATED;
        } else {
                d = to_skbedit(a);
-               if (bind)
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index bab8ae0cefc08800678415b163921c67a0e50e4e..c45f926dafb9637ebef79765ec59f86094220930 100644 (file)
@@ -77,7 +77,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        int action;
        __be16 push_vid = 0;
        __be16 push_proto = 0;
-       int ret = 0;
+       int ret = 0, exists = 0;
        int err;
 
        if (!nla)
@@ -90,15 +90,25 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        if (!tb[TCA_VLAN_PARMS])
                return -EINVAL;
        parm = nla_data(tb[TCA_VLAN_PARMS]);
+       exists = tcf_hash_check(tn, parm->index, a, bind);
+       if (exists && bind)
+               return 0;
+
        switch (parm->v_action) {
        case TCA_VLAN_ACT_POP:
                break;
        case TCA_VLAN_ACT_PUSH:
-               if (!tb[TCA_VLAN_PUSH_VLAN_ID])
+               if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        return -EINVAL;
+               }
                push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
-               if (push_vid >= VLAN_VID_MASK)
+               if (push_vid >= VLAN_VID_MASK) {
+                       if (exists)
+                               tcf_hash_release(a, bind);
                        return -ERANGE;
+               }
 
                if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
                        push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
@@ -114,11 +124,13 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                }
                break;
        default:
+               if (exists)
+                       tcf_hash_release(a, bind);
                return -EINVAL;
        }
        action = parm->v_action;
 
-       if (!tcf_hash_check(tn, parm->index, a, bind)) {
+       if (!exists) {
                ret = tcf_hash_create(tn, parm->index, est, a,
                                      sizeof(*v), bind, false);
                if (ret)
@@ -126,8 +138,6 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
 
                ret = ACT_P_CREATED;
        } else {
-               if (bind)
-                       return 0;
                tcf_hash_release(a, bind);
                if (!ovr)
                        return -EEXIST;
index 9640bb39a5d293d55a96edc5164d369c1cded127..4befe97a90349832030e139125369c048cb1d67c 100644 (file)
@@ -395,6 +395,25 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
        sch->q.qlen++;
 }
 
+/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
+ * when we statistically choose to corrupt one, we instead segment it, returning
+ * the first packet to be corrupted, and re-enqueue the remaining frames
+ */
+static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch)
+{
+       struct sk_buff *segs;
+       netdev_features_t features = netif_skb_features(skb);
+
+       segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+
+       if (IS_ERR_OR_NULL(segs)) {
+               qdisc_reshape_fail(skb, sch);
+               return NULL;
+       }
+       consume_skb(skb);
+       return segs;
+}
+
 /*
  * Insert one skb into qdisc.
  * Note: parent depends on return value to account for queue length.
@@ -407,7 +426,11 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
        /* We don't fill cb now as skb_unshare() may invalidate it */
        struct netem_skb_cb *cb;
        struct sk_buff *skb2;
+       struct sk_buff *segs = NULL;
+       unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
+       int nb = 0;
        int count = 1;
+       int rc = NET_XMIT_SUCCESS;
 
        /* Random duplication */
        if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
@@ -453,10 +476,23 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
         * do it now in software before we mangle it.
         */
        if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
+               if (skb_is_gso(skb)) {
+                       segs = netem_segment(skb, sch);
+                       if (!segs)
+                               return NET_XMIT_DROP;
+               } else {
+                       segs = skb;
+               }
+
+               skb = segs;
+               segs = segs->next;
+
                if (!(skb = skb_unshare(skb, GFP_ATOMIC)) ||
                    (skb->ip_summed == CHECKSUM_PARTIAL &&
-                    skb_checksum_help(skb)))
-                       return qdisc_drop(skb, sch);
+                    skb_checksum_help(skb))) {
+                       rc = qdisc_drop(skb, sch);
+                       goto finish_segs;
+               }
 
                skb->data[prandom_u32() % skb_headlen(skb)] ^=
                        1<<(prandom_u32() % 8);
@@ -516,6 +552,27 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                sch->qstats.requeues++;
        }
 
+finish_segs:
+       if (segs) {
+               while (segs) {
+                       skb2 = segs->next;
+                       segs->next = NULL;
+                       qdisc_skb_cb(segs)->pkt_len = segs->len;
+                       last_len = segs->len;
+                       rc = qdisc_enqueue(segs, sch);
+                       if (rc != NET_XMIT_SUCCESS) {
+                               if (net_xmit_drop_count(rc))
+                                       qdisc_qstats_drop(sch);
+                       } else {
+                               nb++;
+                               len += last_len;
+                       }
+                       segs = skb2;
+               }
+               sch->q.qlen += nb;
+               if (nb > 1)
+                       qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
+       }
        return NET_XMIT_SUCCESS;
 }
 
index ace178fd385038523bc498222e9497245ccae4b0..9aaa1bc566ae3ec59d03a1ef3c7872ab86a71c0d 100644 (file)
@@ -1444,6 +1444,7 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
        int bearer_id = b->identity;
        struct tipc_link_entry *le;
        u16 bc_ack = msg_bcast_ack(hdr);
+       u32 self = tipc_own_addr(net);
        int rc = 0;
 
        __skb_queue_head_init(&xmitq);
@@ -1460,6 +1461,10 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
                        return tipc_node_bc_rcv(net, skb, bearer_id);
        }
 
+       /* Discard unicast link messages destined for another node */
+       if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
+               goto discard;
+
        /* Locate neighboring node that sent packet */
        n = tipc_node_find(net, msg_prevnode(hdr));
        if (unlikely(!n))
index 3dce53ebea9240fd2f46f9c428170a2e6c345d7b..b5f1221f48d4859156aa640066e1fd80cf2927dc 100644 (file)
@@ -1808,27 +1808,8 @@ vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        else if (sk->sk_shutdown & RCV_SHUTDOWN)
                err = 0;
 
-       if (copied > 0) {
-               /* We only do these additional bookkeeping/notification steps
-                * if we actually copied something out of the queue pair
-                * instead of just peeking ahead.
-                */
-
-               if (!(flags & MSG_PEEK)) {
-                       /* If the other side has shutdown for sending and there
-                        * is nothing more to read, then modify the socket
-                        * state.
-                        */
-                       if (vsk->peer_shutdown & SEND_SHUTDOWN) {
-                               if (vsock_stream_has_data(vsk) <= 0) {
-                                       sk->sk_state = SS_UNCONNECTED;
-                                       sock_set_flag(sk, SOCK_DONE);
-                                       sk->sk_state_change(sk);
-                               }
-                       }
-               }
+       if (copied > 0)
                err = copied;
-       }
 
 out:
        release_sock(sk);
index 7ecd04c21360994627f7b9ec0231bf454510fd46..997ff7b2509b49a3da6d3183fe65512f8f4caa38 100644 (file)
@@ -277,6 +277,7 @@ int x25_negotiate_facilities(struct sk_buff *skb, struct sock *sk,
 
        memset(&theirs, 0, sizeof(theirs));
        memcpy(new, ours, sizeof(*new));
+       memset(dte, 0, sizeof(*dte));
 
        len = x25_parse_facilities(skb, &theirs, dte, &x25->vc_facil_mask);
        if (len < 0)
index ff4a91fcab9fd291b09eb48487690301c4388ae2..637387bbaaea33f62a1a970c9a361c895c4e5f2d 100644 (file)
@@ -99,6 +99,9 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
 
                skb_dst_force(skb);
 
+               /* Inner headers are invalid now. */
+               skb->encapsulation = 0;
+
                err = x->type->output(x, skb);
                if (err == -EINPROGRESS)
                        goto out;
index 8d8d1ec429eb6761b0d29b30299536ac782779c5..9b96f4fb8cea64f9b98c835b045bd8c840719433 100644 (file)
@@ -18,7 +18,6 @@ int bpf_prog1(struct pt_regs *ctx)
                u64 cookie;
        } data;
 
-       memset(&data, 0, sizeof(data));
        data.pid = bpf_get_current_pid_tgid();
        data.cookie = 0x12345678;
 
index 161dd0d67da8a84f0062672cd9125c491a2f58e3..a9155077feefb957d38dc29b4823a9529a7ee0e9 100644 (file)
@@ -371,6 +371,49 @@ static void do_usb_table(void *symval, unsigned long size,
                do_usb_entry_multi(symval + i, mod);
 }
 
+static void do_of_entry_multi(void *symval, struct module *mod)
+{
+       char alias[500];
+       int len;
+       char *tmp;
+
+       DEF_FIELD_ADDR(symval, of_device_id, name);
+       DEF_FIELD_ADDR(symval, of_device_id, type);
+       DEF_FIELD_ADDR(symval, of_device_id, compatible);
+
+       len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
+                     (*type)[0] ? *type : "*");
+
+       if (compatible[0])
+               sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
+                       *compatible);
+
+       /* Replace all whitespace with underscores */
+       for (tmp = alias; tmp && *tmp; tmp++)
+               if (isspace(*tmp))
+                       *tmp = '_';
+
+       buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+       strcat(alias, "C");
+       add_wildcard(alias);
+       buf_printf(&mod->dev_table_buf, "MODULE_ALIAS(\"%s\");\n", alias);
+}
+
+static void do_of_table(void *symval, unsigned long size,
+                       struct module *mod)
+{
+       unsigned int i;
+       const unsigned long id_size = SIZE_of_device_id;
+
+       device_id_check(mod->name, "of", size, id_size, symval);
+
+       /* Leave last one: it's the terminator. */
+       size -= id_size;
+
+       for (i = 0; i < size; i += id_size)
+               do_of_entry_multi(symval + i, mod);
+}
+
 /* Looks like: hid:bNvNpN */
 static int do_hid_entry(const char *filename,
                             void *symval, char *alias)
@@ -684,30 +727,6 @@ static int do_pcmcia_entry(const char *filename,
 }
 ADD_TO_DEVTABLE("pcmcia", pcmcia_device_id, do_pcmcia_entry);
 
-static int do_of_entry (const char *filename, void *symval, char *alias)
-{
-       int len;
-       char *tmp;
-       DEF_FIELD_ADDR(symval, of_device_id, name);
-       DEF_FIELD_ADDR(symval, of_device_id, type);
-       DEF_FIELD_ADDR(symval, of_device_id, compatible);
-
-       len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
-                     (*type)[0] ? *type : "*");
-
-       if (compatible[0])
-               sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
-                       *compatible);
-
-       /* Replace all whitespace with underscores */
-       for (tmp = alias; tmp && *tmp; tmp++)
-               if (isspace (*tmp))
-                       *tmp = '_';
-
-       return 1;
-}
-ADD_TO_DEVTABLE("of", of_device_id, do_of_entry);
-
 static int do_vio_entry(const char *filename, void *symval,
                char *alias)
 {
@@ -1348,6 +1367,8 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
        /* First handle the "special" cases */
        if (sym_is(name, namelen, "usb"))
                do_usb_table(symval, sym->st_size, mod);
+       if (sym_is(name, namelen, "of"))
+               do_of_table(symval, sym->st_size, mod);
        else if (sym_is(name, namelen, "pnp"))
                do_pnp_device_entry(symval, sym->st_size, mod);
        else if (sym_is(name, namelen, "pnp_card"))
index be09e2cacf828d32c75028ccf04f31e88657da70..3cd0a58672dd1df684e12d0a4331b56fd4fa12fb 100644 (file)
@@ -884,10 +884,10 @@ static char *func_tokens[] = {
        "BPRM_CHECK",
        "MODULE_CHECK",
        "FIRMWARE_CHECK",
+       "POST_SETATTR",
        "KEXEC_KERNEL_CHECK",
        "KEXEC_INITRAMFS_CHECK",
-       "POLICY_CHECK",
-       "POST_SETATTR"
+       "POLICY_CHECK"
 };
 
 void *ima_policy_start(struct seq_file *m, loff_t *pos)
index 64e0d1d81ca5afd66625669079bdce05fa23dc63..9739fce9e032c3a2126a75b1148c21c5e3502bdd 100644 (file)
@@ -139,14 +139,6 @@ static int reconfig_codec(struct hda_codec *codec)
                goto error;
        }
        err = snd_hda_codec_configure(codec);
-       if (err < 0)
-               goto error;
-       /* rebuild PCMs */
-       err = snd_hda_codec_build_pcms(codec);
-       if (err < 0)
-               goto error;
-       /* rebuild mixers */
-       err = snd_hda_codec_build_controls(codec);
        if (err < 0)
                goto error;
        err = snd_card_register(codec->card);
index 1483f85999ecd82d1b9215f5a634011819fd368a..a010d704e0e20b066d912349088cfe0a27de0c31 100644 (file)
@@ -3401,6 +3401,9 @@ static int patch_atihdmi(struct hda_codec *codec)
        spec->ops.pin_hbr_setup = atihdmi_pin_hbr_setup;
        spec->ops.setup_stream = atihdmi_setup_stream;
 
+       spec->chmap.ops.pin_get_slot_channel = atihdmi_pin_get_slot_channel;
+       spec->chmap.ops.pin_set_slot_channel = atihdmi_pin_set_slot_channel;
+
        if (!has_amd_full_remap_support(codec)) {
                /* override to ATI/AMD-specific versions with pairwise mapping */
                spec->chmap.ops.chmap_cea_alloc_validate_get_type =
@@ -3408,10 +3411,6 @@ static int patch_atihdmi(struct hda_codec *codec)
                spec->chmap.ops.cea_alloc_to_tlv_chmap =
                                atihdmi_paired_cea_alloc_to_tlv_chmap;
                spec->chmap.ops.chmap_validate = atihdmi_paired_chmap_validate;
-               spec->chmap.ops.pin_get_slot_channel =
-                               atihdmi_pin_get_slot_channel;
-               spec->chmap.ops.pin_set_slot_channel =
-                               atihdmi_pin_set_slot_channel;
        }
 
        /* ATI/AMD converters do not advertise all of their capabilities */
index ac4490a968638ff7eed3b4007ccdcda8a7b81cea..4918ffa5ba6829e102519b3a7af700b049d93ae2 100644 (file)
@@ -6426,6 +6426,7 @@ enum {
        ALC668_FIXUP_DELL_DISABLE_AAMIX,
        ALC668_FIXUP_DELL_XPS13,
        ALC662_FIXUP_ASUS_Nx50,
+       ALC668_FIXUP_ASUS_Nx51,
 };
 
 static const struct hda_fixup alc662_fixups[] = {
@@ -6672,6 +6673,15 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC662_FIXUP_BASS_1A
        },
+       [ALC668_FIXUP_ASUS_Nx51] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       {0x1a, 0x90170151}, /* bass speaker */
+                       {}
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_BASS_CHMAP,
+       },
 };
 
 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6694,11 +6704,14 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
+       SND_PCI_QUIRK(0x1043, 0x1080, "Asus UX501VW", ALC668_FIXUP_HEADSET_MODE),
        SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_ASUS_Nx50),
        SND_PCI_QUIRK(0x1043, 0x13df, "Asus N550JX", ALC662_FIXUP_BASS_1A),
        SND_PCI_QUIRK(0x1043, 0x129d, "Asus N750", ALC662_FIXUP_ASUS_Nx50),
        SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x15a7, "ASUS UX51VZH", ALC662_FIXUP_BASS_16),
+       SND_PCI_QUIRK(0x1043, 0x177d, "ASUS N551", ALC668_FIXUP_ASUS_Nx51),
+       SND_PCI_QUIRK(0x1043, 0x17bd, "ASUS N751", ALC668_FIXUP_ASUS_Nx51),
        SND_PCI_QUIRK(0x1043, 0x1b73, "ASUS N55SF", ALC662_FIXUP_BASS_16),
        SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
        SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
index 0adfd9537cf766bbe130c71c4cc029091b2d4118..6adde457b602e08aedd1806e8b79863d65e006cc 100644 (file)
@@ -1137,8 +1137,11 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
        case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+       case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
        case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+       case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
        case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
+       case USB_ID(0x1de7, 0x0114): /* Phoenix Audio MT202pcs */
        case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
                return true;
        }
index 6b7707270aa3b19791c8b6248f90ecddeabc1fdd..9f878619077aeb9ecbb50f87b381706d083def37 100644 (file)
@@ -30,6 +30,7 @@ endef
 FEATURE_TESTS_BASIC :=                 \
        backtrace                       \
        dwarf                           \
+       dwarf_getlocations              \
        fortify-source                  \
        sync-compare-and-swap           \
        glibc                           \
@@ -78,6 +79,7 @@ endif
 
 FEATURE_DISPLAY ?=                     \
        dwarf                           \
+       dwarf_getlocations              \
        glibc                           \
        gtk2                            \
        libaudit                        \
index c5f4c417428d7099fbe4f487a179b0663f478611..4ae94dbfdab98d5181e18d9d8864a24f942bdc53 100644 (file)
@@ -3,6 +3,7 @@ FILES=                                  \
        test-backtrace.bin              \
        test-bionic.bin                 \
        test-dwarf.bin                  \
+       test-dwarf_getlocations.bin     \
        test-fortify-source.bin         \
        test-sync-compare-and-swap.bin  \
        test-glibc.bin                  \
@@ -82,6 +83,9 @@ endif
 $(OUTPUT)test-dwarf.bin:
        $(BUILD) $(DWARFLIBS)
 
+$(OUTPUT)test-dwarf_getlocations.bin:
+       $(BUILD) $(DWARFLIBS)
+
 $(OUTPUT)test-libelf-mmap.bin:
        $(BUILD) -lelf
 
index e499a36c1e4a9e21e9c355309b53a7dc5901664a..a282e8cb84f308da358983ebccbf80c612e7d061 100644 (file)
 # include "test-dwarf.c"
 #undef main
 
+#define main main_test_dwarf_getlocations
+# include "test-dwarf_getlocations.c"
+#undef main
+
 #define main main_test_libelf_getphdrnum
 # include "test-libelf-getphdrnum.c"
 #undef main
@@ -143,6 +147,7 @@ int main(int argc, char *argv[])
        main_test_libelf_mmap();
        main_test_glibc();
        main_test_dwarf();
+       main_test_dwarf_getlocations();
        main_test_libelf_getphdrnum();
        main_test_libunwind();
        main_test_libaudit();
diff --git a/tools/build/feature/test-dwarf_getlocations.c b/tools/build/feature/test-dwarf_getlocations.c
new file mode 100644 (file)
index 0000000..7016269
--- /dev/null
@@ -0,0 +1,12 @@
+#include <stdlib.h>
+#include <elfutils/libdw.h>
+
+int main(void)
+{
+       Dwarf_Addr base, start, end;
+       Dwarf_Attribute attr;
+       Dwarf_Op *op;
+        size_t nops;
+       ptrdiff_t offset = 0;
+        return (int)dwarf_getlocations(&attr, offset, &base, &start, &end, &op, &nops);
+}
index 0144b3d1bb77ac63441c77e1bb24e3405cfe3d8f..88cccea3ca9910314bf8024545c76620051fa263 100644 (file)
@@ -1164,11 +1164,11 @@ process_filter(struct event_format *event, struct filter_arg **parg,
                current_op = current_exp;
 
        ret = collapse_tree(current_op, parg, error_str);
+       /* collapse_tree() may free current_op, and updates parg accordingly */
+       current_op = NULL;
        if (ret < 0)
                goto fail;
 
-       *parg = current_op;
-
        free(token);
        return 0;
 
index 5b32413409459ce165e2f56990dbc0f52aa8bd31..544b05a53b7057bbe9092e31f600065d47e3b24e 100644 (file)
@@ -98,6 +98,9 @@ static char *get_klog_buff(unsigned int *klen)
        char *buff;
 
        len = klogctl(CMD_ACTION_SIZE_BUFFER, NULL, 0);
+       if (len < 0)
+               return NULL;
+
        buff = malloc(len);
        if (!buff)
                return NULL;
index 9223c164e545d869267b9b7a17d409b774dd7904..1f86ee8fb831c99e8d22ead64ca33ec11a816985 100644 (file)
@@ -63,6 +63,8 @@ struct pt_regs_offset {
 # define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
 #endif
 
+/* TODO: switching by dwarf address size */
+#ifndef __x86_64__
 static const struct pt_regs_offset x86_32_regoffset_table[] = {
        REG_OFFSET_NAME_32("%ax",       eax),
        REG_OFFSET_NAME_32("%cx",       ecx),
@@ -75,6 +77,8 @@ static const struct pt_regs_offset x86_32_regoffset_table[] = {
        REG_OFFSET_END,
 };
 
+#define regoffset_table x86_32_regoffset_table
+#else
 static const struct pt_regs_offset x86_64_regoffset_table[] = {
        REG_OFFSET_NAME_64("%ax",       rax),
        REG_OFFSET_NAME_64("%dx",       rdx),
@@ -95,11 +99,7 @@ static const struct pt_regs_offset x86_64_regoffset_table[] = {
        REG_OFFSET_END,
 };
 
-/* TODO: switching by dwarf address size */
-#ifdef __x86_64__
 #define regoffset_table x86_64_regoffset_table
-#else
-#define regoffset_table x86_32_regoffset_table
 #endif
 
 /* Minus 1 for the ending REG_OFFSET_END */
index 3770c3dffe5e141e6c3af2736b9a3e0d41c81cc4..52826696c8528d6f17773b9d4359ea7053cc7e0e 100644 (file)
@@ -1415,21 +1415,19 @@ static int is_directory(const char *base_path, const struct dirent *dent)
        return S_ISDIR(st.st_mode);
 }
 
-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\
-       while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) &&     \
-              lang_next)                                               \
-               if ((lang_dirent.d_type == DT_DIR ||                    \
-                    (lang_dirent.d_type == DT_UNKNOWN &&               \
-                     is_directory(scripts_path, &lang_dirent))) &&     \
-                   (strcmp(lang_dirent.d_name, ".")) &&                \
-                   (strcmp(lang_dirent.d_name, "..")))
-
-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\
-       while (!readdir_r(lang_dir, &script_dirent, &script_next) &&    \
-              script_next)                                             \
-               if (script_dirent.d_type != DT_DIR &&                   \
-                   (script_dirent.d_type != DT_UNKNOWN ||              \
-                    !is_directory(lang_path, &script_dirent)))
+#define for_each_lang(scripts_path, scripts_dir, lang_dirent)          \
+       while ((lang_dirent = readdir(scripts_dir)) != NULL)            \
+               if ((lang_dirent->d_type == DT_DIR ||                   \
+                    (lang_dirent->d_type == DT_UNKNOWN &&              \
+                     is_directory(scripts_path, lang_dirent))) &&      \
+                   (strcmp(lang_dirent->d_name, ".")) &&               \
+                   (strcmp(lang_dirent->d_name, "..")))
+
+#define for_each_script(lang_path, lang_dir, script_dirent)            \
+       while ((script_dirent = readdir(lang_dir)) != NULL)             \
+               if (script_dirent->d_type != DT_DIR &&                  \
+                   (script_dirent->d_type != DT_UNKNOWN ||             \
+                    !is_directory(lang_path, script_dirent)))
 
 
 #define RECORD_SUFFIX                  "-record"
@@ -1575,7 +1573,7 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
                                  const char *s __maybe_unused,
                                  int unset __maybe_unused)
 {
-       struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+       struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN];
        DIR *scripts_dir, *lang_dir;
        char script_path[MAXPATHLEN];
@@ -1590,19 +1588,19 @@ static int list_available_scripts(const struct option *opt __maybe_unused,
        if (!scripts_dir)
                return -1;
 
-       for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+       for_each_lang(scripts_path, scripts_dir, lang_dirent) {
                snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-                        lang_dirent.d_name);
+                        lang_dirent->d_name);
                lang_dir = opendir(lang_path);
                if (!lang_dir)
                        continue;
 
-               for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-                       script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+               for_each_script(lang_path, lang_dir, script_dirent) {
+                       script_root = get_script_root(script_dirent, REPORT_SUFFIX);
                        if (script_root) {
                                desc = script_desc__findnew(script_root);
                                snprintf(script_path, MAXPATHLEN, "%s/%s",
-                                        lang_path, script_dirent.d_name);
+                                        lang_path, script_dirent->d_name);
                                read_script_info(desc, script_path);
                                free(script_root);
                        }
@@ -1690,7 +1688,7 @@ static int check_ev_match(char *dir_name, char *scriptname,
  */
 int find_scripts(char **scripts_array, char **scripts_path_array)
 {
-       struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+       struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
        DIR *scripts_dir, *lang_dir;
        struct perf_session *session;
@@ -1713,9 +1711,9 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
                return -1;
        }
 
-       for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+       for_each_lang(scripts_path, scripts_dir, lang_dirent) {
                snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
-                        lang_dirent.d_name);
+                        lang_dirent->d_name);
 #ifdef NO_LIBPERL
                if (strstr(lang_path, "perl"))
                        continue;
@@ -1729,16 +1727,16 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
                if (!lang_dir)
                        continue;
 
-               for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+               for_each_script(lang_path, lang_dir, script_dirent) {
                        /* Skip those real time scripts: xxxtop.p[yl] */
-                       if (strstr(script_dirent.d_name, "top."))
+                       if (strstr(script_dirent->d_name, "top."))
                                continue;
                        sprintf(scripts_path_array[i], "%s/%s", lang_path,
-                               script_dirent.d_name);
-                       temp = strchr(script_dirent.d_name, '.');
+                               script_dirent->d_name);
+                       temp = strchr(script_dirent->d_name, '.');
                        snprintf(scripts_array[i],
-                               (temp - script_dirent.d_name) + 1,
-                               "%s", script_dirent.d_name);
+                               (temp - script_dirent->d_name) + 1,
+                               "%s", script_dirent->d_name);
 
                        if (check_ev_match(lang_path,
                                        scripts_array[i], session))
@@ -1756,7 +1754,7 @@ int find_scripts(char **scripts_array, char **scripts_path_array)
 
 static char *get_script_path(const char *script_root, const char *suffix)
 {
-       struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+       struct dirent *script_dirent, *lang_dirent;
        char scripts_path[MAXPATHLEN];
        char script_path[MAXPATHLEN];
        DIR *scripts_dir, *lang_dir;
@@ -1769,21 +1767,21 @@ static char *get_script_path(const char *script_root, const char *suffix)
        if (!scripts_dir)
                return NULL;
 
-       for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+       for_each_lang(scripts_path, scripts_dir, lang_dirent) {
                snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
-                        lang_dirent.d_name);
+                        lang_dirent->d_name);
                lang_dir = opendir(lang_path);
                if (!lang_dir)
                        continue;
 
-               for_each_script(lang_path, lang_dir, script_dirent, script_next) {
-                       __script_root = get_script_root(&script_dirent, suffix);
+               for_each_script(lang_path, lang_dir, script_dirent) {
+                       __script_root = get_script_root(script_dirent, suffix);
                        if (__script_root && !strcmp(script_root, __script_root)) {
                                free(__script_root);
                                closedir(lang_dir);
                                closedir(scripts_dir);
                                snprintf(script_path, MAXPATHLEN, "%s/%s",
-                                        lang_path, script_dirent.d_name);
+                                        lang_path, script_dirent->d_name);
                                return strdup(script_path);
                        }
                        free(__script_root);
index 1f19f2f999c841b9da140e10bcaf5e6e0f41ee6b..307e8a1a003c5ebfd7eefe6a8770a868cd3b55e9 100644 (file)
@@ -528,6 +528,7 @@ static int __run_perf_stat(int argc, const char **argv)
                perf_evlist__set_leader(evsel_list);
 
        evlist__for_each(evsel_list, counter) {
+try_again:
                if (create_perf_stat_counter(counter) < 0) {
                        /*
                         * PPC returns ENXIO for HW counters until 2.6.37
@@ -544,7 +545,11 @@ static int __run_perf_stat(int argc, const char **argv)
                                if ((counter->leader != counter) ||
                                    !(counter->leader->nr_members > 1))
                                        continue;
-                       }
+                       } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) {
+                                if (verbose)
+                                        ui__warning("%s\n", msg);
+                                goto try_again;
+                        }
 
                        perf_evsel__open_strerror(counter, &target,
                                                  errno, msg, sizeof(msg));
index f7d7f5a1cad538e44be9400b520ca32315eed999..6f8f6430f2bf6be5ef09565cabedf33ed8963a7b 100644 (file)
@@ -268,6 +268,12 @@ else
     ifneq ($(feature-dwarf), 1)
       msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev);
       NO_DWARF := 1
+    else
+      ifneq ($(feature-dwarf_getlocations), 1)
+        msg := $(warning Old libdw.h, finding variables at given 'perf probe' point will not work, install elfutils-devel/libdw-dev >= 0.157);
+      else
+        CFLAGS += -DHAVE_DWARF_GETLOCATIONS
+      endif # dwarf_getlocations
     endif # Dwarf support
   endif # libelf support
 endif # NO_LIBELF
index 577e600c8eb15a66cb50580b0c6361060147894a..aea189b41cc8c43f8ce325c4fde2f8ae27507df3 100644 (file)
@@ -959,6 +959,7 @@ int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
        return 0;
 }
 
+#ifdef HAVE_DWARF_GETLOCATIONS
 /**
  * die_get_var_innermost_scope - Get innermost scope range of given variable DIE
  * @sp_die: a subprogram DIE
@@ -1080,3 +1081,11 @@ int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
 
        return ret;
 }
+#else
+int die_get_var_range(Dwarf_Die *sp_die __maybe_unused,
+                     Dwarf_Die *vr_die __maybe_unused,
+                     struct strbuf *buf __maybe_unused)
+{
+       return -ENOTSUP;
+}
+#endif
index dad55d04ffdd5074c212fac7dbcd306444f008e6..edcf4ed4e99c8891e8990b71389641b5e14ae9ef 100644 (file)
@@ -433,7 +433,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
 {
        char filename[PATH_MAX];
        DIR *tasks;
-       struct dirent dirent, *next;
+       struct dirent *dirent;
        pid_t tgid, ppid;
        int rc = 0;
 
@@ -462,11 +462,11 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                return 0;
        }
 
-       while (!readdir_r(tasks, &dirent, &next) && next) {
+       while ((dirent = readdir(tasks)) != NULL) {
                char *end;
                pid_t _pid;
 
-               _pid = strtol(dirent.d_name, &end, 10);
+               _pid = strtol(dirent->d_name, &end, 10);
                if (*end)
                        continue;
 
@@ -575,7 +575,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
 {
        DIR *proc;
        char proc_path[PATH_MAX];
-       struct dirent dirent, *next;
+       struct dirent *dirent;
        union perf_event *comm_event, *mmap_event, *fork_event;
        int err = -1;
 
@@ -600,9 +600,9 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
        if (proc == NULL)
                goto out_free_fork;
 
-       while (!readdir_r(proc, &dirent, &next) && next) {
+       while ((dirent = readdir(proc)) != NULL) {
                char *end;
-               pid_t pid = strtol(dirent.d_name, &end, 10);
+               pid_t pid = strtol(dirent->d_name, &end, 10);
 
                if (*end) /* only interested in proper numerical dirents */
                        continue;
index 738ce226002b8a0e88093fefaf74e9fe6093a13a..645dc18288367733567b760f136b81657a5937ef 100644 (file)
@@ -2345,6 +2345,8 @@ out:
 bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
                          char *msg, size_t msgsize)
 {
+       int paranoid;
+
        if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
            evsel->attr.type   == PERF_TYPE_HARDWARE &&
            evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
@@ -2363,6 +2365,22 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
                evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
 
                zfree(&evsel->name);
+               return true;
+       } else if (err == EACCES && !evsel->attr.exclude_kernel &&
+                  (paranoid = perf_event_paranoid()) > 1) {
+               const char *name = perf_evsel__name(evsel);
+               char *new_name;
+
+               if (asprintf(&new_name, "%s%su", name, strchr(name, ':') ? "" : ":") < 0)
+                       return false;
+
+               if (evsel->name)
+                       free(evsel->name);
+               evsel->name = new_name;
+               scnprintf(msg, msgsize,
+"kernel.perf_event_paranoid=%d, trying to fall back to excluding kernel samples", paranoid);
+               evsel->attr.exclude_kernel = 1;
+
                return true;
        }
 
@@ -2382,12 +2400,13 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
                 "Consider tweaking /proc/sys/kernel/perf_event_paranoid,\n"
                 "which controls use of the performance events system by\n"
                 "unprivileged users (without CAP_SYS_ADMIN).\n\n"
-                "The default value is 1:\n\n"
+                "The current value is %d:\n\n"
                 "  -1: Allow use of (almost) all events by all users\n"
                 ">= 0: Disallow raw tracepoint access by users without CAP_IOC_LOCK\n"
                 ">= 1: Disallow CPU event access by users without CAP_SYS_ADMIN\n"
                 ">= 2: Disallow kernel profiling by users without CAP_SYS_ADMIN",
-                                target->system_wide ? "system-wide " : "");
+                                target->system_wide ? "system-wide " : "",
+                                perf_event_paranoid());
        case ENOENT:
                return scnprintf(msg, size, "The %s event is not supported.",
                                 perf_evsel__name(evsel));
index 4c19d5e79d8c4d626eb3fa91486cc1d83447aeeb..bcbc983d4b12215dc1045fb0542597f08db38b45 100644 (file)
@@ -138,11 +138,11 @@ struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
 #define PERF_EVENT_TYPE(config)                __PERF_EVENT_FIELD(config, TYPE)
 #define PERF_EVENT_ID(config)          __PERF_EVENT_FIELD(config, EVENT)
 
-#define for_each_subsystem(sys_dir, sys_dirent, sys_next)             \
-       while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next)        \
-       if (sys_dirent.d_type == DT_DIR &&                                     \
-          (strcmp(sys_dirent.d_name, ".")) &&                                 \
-          (strcmp(sys_dirent.d_name, "..")))
+#define for_each_subsystem(sys_dir, sys_dirent)                        \
+       while ((sys_dirent = readdir(sys_dir)) != NULL)         \
+               if (sys_dirent->d_type == DT_DIR &&             \
+                   (strcmp(sys_dirent->d_name, ".")) &&        \
+                   (strcmp(sys_dirent->d_name, "..")))
 
 static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
 {
@@ -159,12 +159,12 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
        return 0;
 }
 
-#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next)             \
-       while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next)        \
-       if (evt_dirent.d_type == DT_DIR &&                                     \
-          (strcmp(evt_dirent.d_name, ".")) &&                                 \
-          (strcmp(evt_dirent.d_name, "..")) &&                                \
-          (!tp_event_has_id(&sys_dirent, &evt_dirent)))
+#define for_each_event(sys_dirent, evt_dir, evt_dirent)                \
+       while ((evt_dirent = readdir(evt_dir)) != NULL)         \
+               if (evt_dirent->d_type == DT_DIR &&             \
+                   (strcmp(evt_dirent->d_name, ".")) &&        \
+                   (strcmp(evt_dirent->d_name, "..")) &&       \
+                   (!tp_event_has_id(sys_dirent, evt_dirent)))
 
 #define MAX_EVENT_LENGTH 512
 
@@ -173,7 +173,7 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
 {
        struct tracepoint_path *path = NULL;
        DIR *sys_dir, *evt_dir;
-       struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+       struct dirent *sys_dirent, *evt_dirent;
        char id_buf[24];
        int fd;
        u64 id;
@@ -184,18 +184,18 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
        if (!sys_dir)
                return NULL;
 
-       for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+       for_each_subsystem(sys_dir, sys_dirent) {
 
                snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent.d_name);
+                        sys_dirent->d_name);
                evt_dir = opendir(dir_path);
                if (!evt_dir)
                        continue;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+               for_each_event(sys_dirent, evt_dir, evt_dirent) {
 
                        snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
-                                evt_dirent.d_name);
+                                evt_dirent->d_name);
                        fd = open(evt_path, O_RDONLY);
                        if (fd < 0)
                                continue;
@@ -220,9 +220,9 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
                                        free(path);
                                        return NULL;
                                }
-                               strncpy(path->system, sys_dirent.d_name,
+                               strncpy(path->system, sys_dirent->d_name,
                                        MAX_EVENT_LENGTH);
-                               strncpy(path->name, evt_dirent.d_name,
+                               strncpy(path->name, evt_dirent->d_name,
                                        MAX_EVENT_LENGTH);
                                return path;
                        }
@@ -1812,7 +1812,7 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob,
                             bool name_only)
 {
        DIR *sys_dir, *evt_dir;
-       struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+       struct dirent *sys_dirent, *evt_dirent;
        char evt_path[MAXPATHLEN];
        char dir_path[MAXPATHLEN];
        char **evt_list = NULL;
@@ -1830,20 +1830,20 @@ restart:
                        goto out_close_sys_dir;
        }
 
-       for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+       for_each_subsystem(sys_dir, sys_dirent) {
                if (subsys_glob != NULL &&
-                   !strglobmatch(sys_dirent.d_name, subsys_glob))
+                   !strglobmatch(sys_dirent->d_name, subsys_glob))
                        continue;
 
                snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent.d_name);
+                        sys_dirent->d_name);
                evt_dir = opendir(dir_path);
                if (!evt_dir)
                        continue;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+               for_each_event(sys_dirent, evt_dir, evt_dirent) {
                        if (event_glob != NULL &&
-                           !strglobmatch(evt_dirent.d_name, event_glob))
+                           !strglobmatch(evt_dirent->d_name, event_glob))
                                continue;
 
                        if (!evt_num_known) {
@@ -1852,7 +1852,7 @@ restart:
                        }
 
                        snprintf(evt_path, MAXPATHLEN, "%s:%s",
-                                sys_dirent.d_name, evt_dirent.d_name);
+                                sys_dirent->d_name, evt_dirent->d_name);
 
                        evt_list[evt_i] = strdup(evt_path);
                        if (evt_list[evt_i] == NULL)
@@ -1905,7 +1905,7 @@ out_close_sys_dir:
 int is_valid_tracepoint(const char *event_string)
 {
        DIR *sys_dir, *evt_dir;
-       struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+       struct dirent *sys_dirent, *evt_dirent;
        char evt_path[MAXPATHLEN];
        char dir_path[MAXPATHLEN];
 
@@ -1913,17 +1913,17 @@ int is_valid_tracepoint(const char *event_string)
        if (!sys_dir)
                return 0;
 
-       for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+       for_each_subsystem(sys_dir, sys_dirent) {
 
                snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
-                        sys_dirent.d_name);
+                        sys_dirent->d_name);
                evt_dir = opendir(dir_path);
                if (!evt_dir)
                        continue;
 
-               for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+               for_each_event(sys_dirent, evt_dir, evt_dirent) {
                        snprintf(evt_path, MAXPATHLEN, "%s:%s",
-                                sys_dirent.d_name, evt_dirent.d_name);
+                                sys_dirent->d_name, evt_dirent->d_name);
                        if (!strcmp(evt_path, event_string)) {
                                closedir(evt_dir);
                                closedir(sys_dir);
index 47966a1618c7310108a42de59327930e6acdb184..f5ba111cd9fb2a00757a36af8d706a3f4380c92e 100644 (file)
@@ -2445,6 +2445,9 @@ static char *prefix_if_not_in(const char *pre, char *str)
 
 static char *setup_overhead(char *keys)
 {
+       if (sort__mode == SORT_MODE__DIFF)
+               return keys;
+
        keys = prefix_if_not_in("overhead", keys);
 
        if (symbol_conf.cumulate_callchain)
index 08afc69099538f66172968dc3827fd9b7b40d5c2..267112b4e3dbe9d291cb58b75d50428b177599a8 100644 (file)
@@ -94,7 +94,7 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
        DIR *proc;
        int max_threads = 32, items, i;
        char path[256];
-       struct dirent dirent, *next, **namelist = NULL;
+       struct dirent *dirent, **namelist = NULL;
        struct thread_map *threads = thread_map__alloc(max_threads);
 
        if (threads == NULL)
@@ -107,16 +107,16 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
        threads->nr = 0;
        atomic_set(&threads->refcnt, 1);
 
-       while (!readdir_r(proc, &dirent, &next) && next) {
+       while ((dirent = readdir(proc)) != NULL) {
                char *end;
                bool grow = false;
                struct stat st;
-               pid_t pid = strtol(dirent.d_name, &end, 10);
+               pid_t pid = strtol(dirent->d_name, &end, 10);
 
                if (*end) /* only interested in proper numerical dirents */
                        continue;
 
-               snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
+               snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
 
                if (stat(path, &st) != 0)
                        continue;
index b04afc3295dfbcb9bc0d4d66b40fa70d32fa016a..ff9e5f20a5a73cedc5f03992066176279c38426e 100644 (file)
@@ -19,6 +19,7 @@ TARGETS += powerpc
 TARGETS += pstore
 TARGETS += ptrace
 TARGETS += seccomp
+TARGETS += sigaltstack
 TARGETS += size
 TARGETS += static_keys
 TARGETS += sysctl
diff --git a/tools/testing/selftests/rcutorture/bin/jitter.sh b/tools/testing/selftests/rcutorture/bin/jitter.sh
new file mode 100755 (executable)
index 0000000..3633828
--- /dev/null
@@ -0,0 +1,90 @@
+#!/bin/bash
+#
+# Alternate sleeping and spinning on randomly selected CPUs.  The purpose
+# of this script is to inflict random OS jitter on a concurrently running
+# test.
+#
+# Usage: jitter.sh me duration [ sleepmax [ spinmax ] ]
+#
+# me: Random-number-generator seed salt.
+# duration: Time to run in seconds.
+# sleepmax: Maximum microseconds to sleep, defaults to one second.
+# spinmax: Maximum microseconds to spin, defaults to one millisecond.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+me=$(($1 * 1000))
+duration=$2
+sleepmax=${3-1000000}
+spinmax=${4-1000}
+
+n=1
+
+starttime=`awk 'BEGIN { print systime(); }' < /dev/null`
+
+while :
+do
+       # Check for done.
+       t=`awk -v s=$starttime 'BEGIN { print systime() - s; }' < /dev/null`
+       if test "$t" -gt "$duration"
+       then
+               exit 0;
+       fi
+
+       # Set affinity to randomly selected CPU
+       cpus=`ls /sys/devices/system/cpu/*/online |
+               sed -e 's,/[^/]*$,,' -e 's/^[^0-9]*//' |
+               grep -v '^0*$'`
+       cpumask=`awk -v cpus="$cpus" -v me=$me -v n=$n 'BEGIN {
+               srand(n + me + systime());
+               ncpus = split(cpus, ca);
+               curcpu = ca[int(rand() * ncpus + 1)];
+               mask = lshift(1, curcpu);
+               if (mask + 0 <= 0)
+                       mask = 1;
+               printf("%#x\n", mask);
+       }' < /dev/null`
+       n=$(($n+1))
+       if ! taskset -p $cpumask $$ > /dev/null 2>&1
+       then
+               echo taskset failure: '"taskset -p ' $cpumask $$ '"'
+               exit 1
+       fi
+
+       # Sleep a random duration
+       sleeptime=`awk -v me=$me -v n=$n -v sleepmax=$sleepmax 'BEGIN {
+               srand(n + me + systime());
+               printf("%06d", int(rand() * sleepmax));
+       }' < /dev/null`
+       n=$(($n+1))
+       sleep .$sleeptime
+
+       # Spin a random duration
+       limit=`awk -v me=$me -v n=$n -v spinmax=$spinmax 'BEGIN {
+               srand(n + me + systime());
+               printf("%06d", int(rand() * spinmax));
+       }' < /dev/null`
+       n=$(($n+1))
+       for i in {1..$limit}
+       do
+               echo > /dev/null
+       done
+done
+
+exit 1
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf-ftrace.sh
new file mode 100755 (executable)
index 0000000..f79b0e9
--- /dev/null
@@ -0,0 +1,121 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcuperf performance measurements,
+# looking for ftrace data.  Exits with 0 if data was found, analyzed, and
+# printed.  Intended to be invoked from kvm-recheck-rcuperf.sh after
+# argument checking.
+#
+# Usage: kvm-recheck-rcuperf-ftrace.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+. tools/testing/selftests/rcutorture/bin/functions.sh
+
+if test "`grep -c 'rcu_exp_grace_period.*start' < $i/console.log`" -lt 100
+then
+       exit 10
+fi
+
+sed -e 's/^\[[^]]*]//' < $i/console.log |
+grep 'us : rcu_exp_grace_period' |
+sed -e 's/us : / : /' |
+tr -d '\015' |
+awk '
+$8 == "start" {
+       if (starttask != "")
+               nlost++;
+       starttask = $1;
+       starttime = $3;
+       startseq = $7;
+}
+
+$8 == "end" {
+       if (starttask == $1 && startseq == $7) {
+               curgpdur = $3 - starttime;
+               gptimes[++n] = curgpdur;
+               gptaskcnt[starttask]++;
+               sum += curgpdur;
+               if (curgpdur > 1000)
+                       print "Long GP " starttime "us to " $3 "us (" curgpdur "us)";
+               starttask = "";
+       } else {
+               # Lost a message or some such, reset.
+               starttask = "";
+               nlost++;
+       }
+}
+
+$8 == "done" {
+       piggybackcnt[$1]++;
+}
+
+END {
+       newNR = asort(gptimes);
+       if (newNR <= 0) {
+               print "No ftrace records found???"
+               exit 10;
+       }
+       pct50 = int(newNR * 50 / 100);
+       if (pct50 < 1)
+               pct50 = 1;
+       pct90 = int(newNR * 90 / 100);
+       if (pct90 < 1)
+               pct90 = 1;
+       pct99 = int(newNR * 99 / 100);
+       if (pct99 < 1)
+               pct99 = 1;
+       div = 10 ** int(log(gptimes[pct90]) / log(10) + .5) / 100;
+       print "Histogram bucket size: " div;
+       last = gptimes[1] - 10;
+       count = 0;
+       for (i = 1; i <= newNR; i++) {
+               current = div * int(gptimes[i] / div);
+               if (last == current) {
+                       count++;
+               } else {
+                       if (count > 0)
+                               print last, count;
+                       count = 1;
+                       last = current;
+               }
+       }
+       if (count > 0)
+               print last, count;
+       print "Distribution of grace periods across tasks:";
+       for (i in gptaskcnt) {
+               print "\t" i, gptaskcnt[i];
+               nbatches += gptaskcnt[i];
+       }
+       ngps = nbatches;
+       print "Distribution of piggybacking across tasks:";
+       for (i in piggybackcnt) {
+               print "\t" i, piggybackcnt[i];
+               ngps += piggybackcnt[i];
+       }
+       print "Average grace-period duration: " sum / newNR " microseconds";
+       print "Minimum grace-period duration: " gptimes[1];
+       print "50th percentile grace-period duration: " gptimes[pct50];
+       print "90th percentile grace-period duration: " gptimes[pct90];
+       print "99th percentile grace-period duration: " gptimes[pct99];
+       print "Maximum grace-period duration: " gptimes[newNR];
+       print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches " Lost: " nlost + 0;
+       print "Computed from ftrace data.";
+}'
+exit 0
diff --git a/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh b/tools/testing/selftests/rcutorture/bin/kvm-recheck-rcuperf.sh
new file mode 100755 (executable)
index 0000000..8f3121a
--- /dev/null
@@ -0,0 +1,96 @@
+#!/bin/bash
+#
+# Analyze a given results directory for rcuperf performance measurements.
+#
+# Usage: kvm-recheck-rcuperf.sh resdir
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2016
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+i="$1"
+if test -d $i
+then
+       :
+else
+       echo Unreadable results directory: $i
+       exit 1
+fi
+PATH=`pwd`/tools/testing/selftests/rcutorture/bin:$PATH; export PATH
+. tools/testing/selftests/rcutorture/bin/functions.sh
+
+if kvm-recheck-rcuperf-ftrace.sh $i
+then
+       # ftrace data was successfully analyzed, call it good!
+       exit 0
+fi
+
+configfile=`echo $i | sed -e 's/^.*\///'`
+
+sed -e 's/^\[[^]]*]//' < $i/console.log |
+awk '
+/-perf: .* gps: .* batches:/ {
+       ngps = $9;
+       nbatches = $11;
+}
+
+/-perf: .*writer-duration/ {
+       gptimes[++n] = $5 / 1000.;
+       sum += $5 / 1000.;
+}
+
+END {
+       newNR = asort(gptimes);
+       if (newNR <= 0) {
+               print "No rcuperf records found???"
+               exit;
+       }
+       pct50 = int(newNR * 50 / 100);
+       if (pct50 < 1)
+               pct50 = 1;
+       pct90 = int(newNR * 90 / 100);
+       if (pct90 < 1)
+               pct90 = 1;
+       pct99 = int(newNR * 99 / 100);
+       if (pct99 < 1)
+               pct99 = 1;
+       div = 10 ** int(log(gptimes[pct90]) / log(10) + .5) / 100;
+       print "Histogram bucket size: " div;
+       last = gptimes[1] - 10;
+       count = 0;
+       for (i = 1; i <= newNR; i++) {
+               current = div * int(gptimes[i] / div);
+               if (last == current) {
+                       count++;
+               } else {
+                       if (count > 0)
+                               print last, count;
+                       count = 1;
+                       last = current;
+               }
+       }
+       if (count > 0)
+               print last, count;
+       print "Average grace-period duration: " sum / newNR " microseconds";
+       print "Minimum grace-period duration: " gptimes[1];
+       print "50th percentile grace-period duration: " gptimes[pct50];
+       print "90th percentile grace-period duration: " gptimes[pct90];
+       print "99th percentile grace-period duration: " gptimes[pct99];
+       print "Maximum grace-period duration: " gptimes[newNR];
+       print "Grace periods: " ngps + 0 " Batches: " nbatches + 0 " Ratio: " ngps / nbatches;
+       print "Computed from rcuperf printk output.";
+}'
index d86bdd6b6cc2df3148adf7bacff4c6a014edc3cc..f659346d335854328fd1af1e662c21179b4b305c 100755 (executable)
@@ -48,7 +48,10 @@ do
                                cat $i/Make.oldconfig.err
                        fi
                        parse-build.sh $i/Make.out $configfile
-                       parse-torture.sh $i/console.log $configfile
+                       if test "$TORTURE_SUITE" != rcuperf
+                       then
+                               parse-torture.sh $i/console.log $configfile
+                       fi
                        parse-console.sh $i/console.log $configfile
                        if test -r $i/Warnings
                        then
index 0f80eefb0bfd5a0cef6796f9f7f0fce60b3797c3..4109f306d855360440ff6fc27037fd2246578789 100755 (executable)
@@ -6,7 +6,7 @@
 # Execute this in the source tree.  Do not run it as a background task
 # because qemu does not seem to like that much.
 #
-# Usage: kvm-test-1-run.sh config builddir resdir minutes qemu-args boot_args
+# Usage: kvm-test-1-run.sh config builddir resdir seconds qemu-args boot_args
 #
 # qemu-args defaults to "-enable-kvm -soundhw pcspk -nographic", along with
 #                      arguments specifying the number of CPUs and other
@@ -91,25 +91,33 @@ fi
 # CONFIG_PCMCIA=n
 # CONFIG_CARDBUS=n
 # CONFIG_YENTA=n
-if kvm-build.sh $config_template $builddir $T
+base_resdir=`echo $resdir | sed -e 's/\.[0-9]\+$//'`
+if test "$base_resdir" != "$resdir" -a -f $base_resdir/bzImage -a -f $base_resdir/vmlinux
 then
+       # Rerunning previous test, so use that test's kernel.
+       QEMU="`identify_qemu $base_resdir/vmlinux`"
+       KERNEL=$base_resdir/bzImage
+       ln -s $base_resdir/Make*.out $resdir  # for kvm-recheck.sh
+       ln -s $base_resdir/.config $resdir  # for kvm-recheck.sh
+elif kvm-build.sh $config_template $builddir $T
+then
+       # Had to build a kernel for this test.
        QEMU="`identify_qemu $builddir/vmlinux`"
        BOOT_IMAGE="`identify_boot_image $QEMU`"
        cp $builddir/Make*.out $resdir
+       cp $builddir/vmlinux $resdir
        cp $builddir/.config $resdir
        if test -n "$BOOT_IMAGE"
        then
                cp $builddir/$BOOT_IMAGE $resdir
+               KERNEL=$resdir/bzImage
        else
                echo No identifiable boot image, not running KVM, see $resdir.
                echo Do the torture scripts know about your architecture?
        fi
        parse-build.sh $resdir/Make.out $title
-       if test -f $builddir.wait
-       then
-               mv $builddir.wait $builddir.ready
-       fi
 else
+       # Build failed.
        cp $builddir/Make*.out $resdir
        cp $builddir/.config $resdir || :
        echo Build failed, not running KVM, see $resdir.
@@ -119,12 +127,15 @@ else
        fi
        exit 1
 fi
+if test -f $builddir.wait
+then
+       mv $builddir.wait $builddir.ready
+fi
 while test -f $builddir.ready
 do
        sleep 1
 done
-minutes=$4
-seconds=$(($minutes * 60))
+seconds=$4
 qemu_args=$5
 boot_args=$6
 
@@ -167,15 +178,26 @@ then
        exit 0
 fi
 echo "NOTE: $QEMU either did not run or was interactive" > $resdir/console.log
-echo $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
-( $QEMU $qemu_args -m 512 -kernel $resdir/bzImage -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
-qemu_pid=$!
+echo $QEMU $qemu_args -m 512 -kernel $KERNEL -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
+( $QEMU $qemu_args -m 512 -kernel $KERNEL -append "$qemu_append $boot_args"& echo $! > $resdir/qemu_pid; wait `cat  $resdir/qemu_pid`; echo $? > $resdir/qemu-retval ) &
 commandcompleted=0
-echo Monitoring qemu job at pid $qemu_pid
+sleep 10 # Give qemu's pid a chance to reach the file
+if test -s "$resdir/qemu_pid"
+then
+       qemu_pid=`cat "$resdir/qemu_pid"`
+       echo Monitoring qemu job at pid $qemu_pid
+else
+       qemu_pid=""
+       echo Monitoring qemu job at yet-as-unknown pid
+fi
 while :
 do
+       if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+       then
+               qemu_pid=`cat "$resdir/qemu_pid"`
+       fi
        kruntime=`awk 'BEGIN { print systime() - '"$kstarttime"' }' < /dev/null`
-       if kill -0 $qemu_pid > /dev/null 2>&1
+       if test -z "$qemu_pid" || kill -0 "$qemu_pid" > /dev/null 2>&1
        then
                if test $kruntime -ge $seconds
                then
@@ -195,12 +217,16 @@ do
                                ps -fp $killpid >> $resdir/Warnings 2>&1
                        fi
                else
-                       echo ' ---' `date`: Kernel done
+                       echo ' ---' `date`: "Kernel done"
                fi
                break
        fi
 done
-if test $commandcompleted -eq 0
+if test -z "$qemu_pid" -a -s "$resdir/qemu_pid"
+then
+       qemu_pid=`cat "$resdir/qemu_pid"`
+fi
+if test $commandcompleted -eq 0 -a -n "$qemu_pid"
 then
        echo Grace period for qemu job at pid $qemu_pid
        while :
@@ -220,6 +246,9 @@ then
                fi
                sleep 1
        done
+elif test -z "$qemu_pid"
+then
+       echo Unknown PID, cannot kill qemu command
 fi
 
 parse-torture.sh $resdir/console.log $title
index 4a431767f77a0215096d3cc5421439a857175783..0d598145873e898d388b6e5d411ddc9b4c937897 100755 (executable)
@@ -34,7 +34,7 @@ T=/tmp/kvm.sh.$$
 trap 'rm -rf $T' 0
 mkdir $T
 
-dur=30
+dur=$((30*60))
 dryrun=""
 KVM="`pwd`/tools/testing/selftests/rcutorture"; export KVM
 PATH=${KVM}/bin:$PATH; export PATH
@@ -48,6 +48,7 @@ resdir=""
 configs=""
 cpus=0
 ds=`date +%Y.%m.%d-%H:%M:%S`
+jitter=0
 
 . functions.sh
 
@@ -63,6 +64,7 @@ usage () {
        echo "       --dryrun sched|script"
        echo "       --duration minutes"
        echo "       --interactive"
+       echo "       --jitter N [ maxsleep (us) [ maxspin (us) ] ]"
        echo "       --kmake-arg kernel-make-arguments"
        echo "       --mac nn:nn:nn:nn:nn:nn"
        echo "       --no-initrd"
@@ -116,12 +118,17 @@ do
                ;;
        --duration)
                checkarg --duration "(minutes)" $# "$2" '^[0-9]*$' '^error'
-               dur=$2
+               dur=$(($2*60))
                shift
                ;;
        --interactive)
                TORTURE_QEMU_INTERACTIVE=1; export TORTURE_QEMU_INTERACTIVE
                ;;
+       --jitter)
+               checkarg --jitter "(# threads [ sleep [ spin ] ])" $# "$2" '^-\{,1\}[0-9]\+\( \+[0-9]\+\)\{,2\} *$' '^error$'
+               jitter="$2"
+               shift
+               ;;
        --kmake-arg)
                checkarg --kmake-arg "(kernel make arguments)" $# "$2" '.*' '^error$'
                TORTURE_KMAKE_ARG="$2"
@@ -156,7 +163,7 @@ do
                shift
                ;;
        --torture)
-               checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\)$' '^--'
+               checkarg --torture "(suite name)" "$#" "$2" '^\(lock\|rcu\|rcuperf\)$' '^--'
                TORTURE_SUITE=$2
                shift
                ;;
@@ -299,6 +306,7 @@ awk < $T/cfgcpu.pack \
        -v CONFIGDIR="$CONFIGFRAG/" \
        -v KVM="$KVM" \
        -v ncpus=$cpus \
+       -v jitter="$jitter" \
        -v rd=$resdir/$ds/ \
        -v dur=$dur \
        -v TORTURE_QEMU_ARG="$TORTURE_QEMU_ARG" \
@@ -359,6 +367,16 @@ function dump(first, pastlast, batchnum)
                print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
                print "fi"
        }
+       njitter = 0;
+       split(jitter, ja);
+       if (ja[1] == -1 && ncpus == 0)
+               njitter = 1;
+       else if (ja[1] == -1)
+               njitter = ncpus;
+       else
+               njitter = ja[1];
+       for (j = 0; j < njitter; j++)
+               print "jitter.sh " j " " dur " " ja[2] " " ja[3] "&"
        print "wait"
        print "if test -z \"$TORTURE_BUILDONLY\""
        print "then"
index 39a2c6d7d7ec03c95fee1faed31d1036d08bb14a..17cbe098b115ce9421a6930a3720d5a834cf07ca 100644 (file)
@@ -14,7 +14,7 @@ CONFIG_HOTPLUG_CPU=n
 CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=4
-CONFIG_RCU_FANOUT_LEAF=4
+CONFIG_RCU_FANOUT_LEAF=3
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
index 0fc8a3428938f8fcd3bbb5d0a80aa9ab2161e40d..e34c334304472987f912f22453179c60dd88e46e 100644 (file)
@@ -1 +1 @@
-rcutorture.torture_type=rcu_bh
+rcutorture.torture_type=rcu_bh rcutree.rcu_fanout_leaf=4
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST b/tools/testing/selftests/rcutorture/configs/rcuperf/CFLIST
new file mode 100644 (file)
index 0000000..c9f56cf
--- /dev/null
@@ -0,0 +1 @@
+TREE
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon b/tools/testing/selftests/rcutorture/configs/rcuperf/CFcommon
new file mode 100644 (file)
index 0000000..a09816b
--- /dev/null
@@ -0,0 +1,2 @@
+CONFIG_RCU_PERF_TEST=y
+CONFIG_PRINTK_TIME=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE
new file mode 100644 (file)
index 0000000..a312f67
--- /dev/null
@@ -0,0 +1,20 @@
+CONFIG_SMP=y
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_LOCKING=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54 b/tools/testing/selftests/rcutorture/configs/rcuperf/TREE54
new file mode 100644 (file)
index 0000000..985fb17
--- /dev/null
@@ -0,0 +1,23 @@
+CONFIG_SMP=y
+CONFIG_NR_CPUS=54
+CONFIG_PREEMPT_NONE=n
+CONFIG_PREEMPT_VOLUNTARY=n
+CONFIG_PREEMPT=y
+#CHECK#CONFIG_PREEMPT_RCU=y
+CONFIG_HZ_PERIODIC=n
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ_FULL=n
+CONFIG_RCU_FAST_NO_HZ=n
+CONFIG_RCU_TRACE=n
+CONFIG_HOTPLUG_CPU=n
+CONFIG_SUSPEND=n
+CONFIG_HIBERNATION=n
+CONFIG_RCU_FANOUT=3
+CONFIG_RCU_FANOUT_LEAF=2
+CONFIG_RCU_NOCB_CPU=n
+CONFIG_DEBUG_LOCK_ALLOC=n
+CONFIG_PROVE_LOCKING=n
+CONFIG_RCU_BOOST=n
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_TRACE=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh b/tools/testing/selftests/rcutorture/configs/rcuperf/ver_functions.sh
new file mode 100644 (file)
index 0000000..34f2a1b
--- /dev/null
@@ -0,0 +1,52 @@
+#!/bin/bash
+#
+# Torture-suite-dependent shell functions for the rest of the scripts.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, you can access it online at
+# http://www.gnu.org/licenses/gpl-2.0.html.
+#
+# Copyright (C) IBM Corporation, 2015
+#
+# Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+
+# rcuperf_param_nreaders bootparam-string
+#
+# Adds nreaders rcuperf module parameter if not already specified.
+rcuperf_param_nreaders () {
+       if ! echo "$1" | grep -q "rcuperf.nreaders"
+       then
+               echo rcuperf.nreaders=-1
+       fi
+}
+
+# rcuperf_param_nwriters bootparam-string
+#
+# Adds nwriters rcuperf module parameter if not already specified.
+rcuperf_param_nwriters () {
+       if ! echo "$1" | grep -q "rcuperf.nwriters"
+       then
+               echo rcuperf.nwriters=-1
+       fi
+}
+
+# per_version_boot_params bootparam-string config-file seconds
+#
+# Adds per-version torture-module parameters to kernels supporting them.
+per_version_boot_params () {
+       echo $1 `rcuperf_param_nreaders "$1"` \
+               `rcuperf_param_nwriters "$1"` \
+               rcuperf.perf_runnable=1 \
+               rcuperf.shutdown=1 \
+               rcuperf.verbose=1
+}
diff --git a/tools/testing/selftests/sigaltstack/Makefile b/tools/testing/selftests/sigaltstack/Makefile
new file mode 100644 (file)
index 0000000..56af56e
--- /dev/null
@@ -0,0 +1,8 @@
+CFLAGS = -Wall
+BINARIES = sas
+all: $(BINARIES)
+
+include ../lib.mk
+
+clean:
+       rm -rf $(BINARIES)
diff --git a/tools/testing/selftests/sigaltstack/sas.c b/tools/testing/selftests/sigaltstack/sas.c
new file mode 100644 (file)
index 0000000..1bb0125
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Stas Sergeev <stsp@users.sourceforge.net>
+ *
+ * test sigaltstack(SS_ONSTACK | SS_AUTODISARM)
+ * If that succeeds, then swapcontext() can be used inside sighandler safely.
+ *
+ */
+
+#define _GNU_SOURCE
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <ucontext.h>
+#include <alloca.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+
+#ifndef SS_AUTODISARM
+#define SS_AUTODISARM  (1U << 31)
+#endif
+
+static void *sstack, *ustack;
+static ucontext_t uc, sc;
+static const char *msg = "[OK]\tStack preserved";
+static const char *msg2 = "[FAIL]\tStack corrupted";
+struct stk_data {
+       char msg[128];
+       int flag;
+};
+
+void my_usr1(int sig, siginfo_t *si, void *u)
+{
+       char *aa;
+       int err;
+       stack_t stk;
+       struct stk_data *p;
+
+       register unsigned long sp asm("sp");
+
+       if (sp < (unsigned long)sstack ||
+                       sp >= (unsigned long)sstack + SIGSTKSZ) {
+               printf("[FAIL]\tSP is not on sigaltstack\n");
+               exit(EXIT_FAILURE);
+       }
+       /* put some data on stack. other sighandler will try to overwrite it */
+       aa = alloca(1024);
+       assert(aa);
+       p = (struct stk_data *)(aa + 512);
+       strcpy(p->msg, msg);
+       p->flag = 1;
+       printf("[RUN]\tsignal USR1\n");
+       err = sigaltstack(NULL, &stk);
+       if (err) {
+               perror("[FAIL]\tsigaltstack()");
+               exit(EXIT_FAILURE);
+       }
+       if (stk.ss_flags != SS_DISABLE)
+               printf("[FAIL]\tss_flags=%i, should be SS_DISABLE\n",
+                               stk.ss_flags);
+       else
+               printf("[OK]\tsigaltstack is disabled in sighandler\n");
+       swapcontext(&sc, &uc);
+       printf("%s\n", p->msg);
+       if (!p->flag) {
+               printf("[RUN]\tAborting\n");
+               exit(EXIT_FAILURE);
+       }
+}
+
+void my_usr2(int sig, siginfo_t *si, void *u)
+{
+       char *aa;
+       struct stk_data *p;
+
+       printf("[RUN]\tsignal USR2\n");
+       aa = alloca(1024);
+       /* dont run valgrind on this */
+       /* try to find the data stored by previous sighandler */
+       p = memmem(aa, 1024, msg, strlen(msg));
+       if (p) {
+               printf("[FAIL]\tsigaltstack re-used\n");
+               /* corrupt the data */
+               strcpy(p->msg, msg2);
+               /* tell other sighandler that his data is corrupted */
+               p->flag = 0;
+       }
+}
+
+static void switch_fn(void)
+{
+       printf("[RUN]\tswitched to user ctx\n");
+       raise(SIGUSR2);
+       setcontext(&sc);
+}
+
+int main(void)
+{
+       struct sigaction act;
+       stack_t stk;
+       int err;
+
+       sigemptyset(&act.sa_mask);
+       act.sa_flags = SA_ONSTACK | SA_SIGINFO;
+       act.sa_sigaction = my_usr1;
+       sigaction(SIGUSR1, &act, NULL);
+       act.sa_sigaction = my_usr2;
+       sigaction(SIGUSR2, &act, NULL);
+       sstack = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+                     MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+       if (sstack == MAP_FAILED) {
+               perror("mmap()");
+               return EXIT_FAILURE;
+       }
+
+       err = sigaltstack(NULL, &stk);
+       if (err) {
+               perror("[FAIL]\tsigaltstack()");
+               exit(EXIT_FAILURE);
+       }
+       if (stk.ss_flags == SS_DISABLE) {
+               printf("[OK]\tInitial sigaltstack state was SS_DISABLE\n");
+       } else {
+               printf("[FAIL]\tInitial sigaltstack state was %i; should have been SS_DISABLE\n", stk.ss_flags);
+               return EXIT_FAILURE;
+       }
+
+       stk.ss_sp = sstack;
+       stk.ss_size = SIGSTKSZ;
+       stk.ss_flags = SS_ONSTACK | SS_AUTODISARM;
+       err = sigaltstack(&stk, NULL);
+       if (err) {
+               if (errno == EINVAL) {
+                       printf("[NOTE]\tThe running kernel doesn't support SS_AUTODISARM\n");
+                       /*
+                        * If test cases for the !SS_AUTODISARM variant were
+                        * added, we could still run them.  We don't have any
+                        * test cases like that yet, so just exit and report
+                        * success.
+                        */
+                       return 0;
+               } else {
+                       perror("[FAIL]\tsigaltstack(SS_ONSTACK | SS_AUTODISARM)");
+                       return EXIT_FAILURE;
+               }
+       }
+
+       ustack = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE,
+                     MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
+       if (ustack == MAP_FAILED) {
+               perror("mmap()");
+               return EXIT_FAILURE;
+       }
+       getcontext(&uc);
+       uc.uc_link = NULL;
+       uc.uc_stack.ss_sp = ustack;
+       uc.uc_stack.ss_size = SIGSTKSZ;
+       makecontext(&uc, switch_fn, 0);
+       raise(SIGUSR1);
+
+       err = sigaltstack(NULL, &stk);
+       if (err) {
+               perror("[FAIL]\tsigaltstack()");
+               exit(EXIT_FAILURE);
+       }
+       if (stk.ss_flags != SS_AUTODISARM) {
+               printf("[FAIL]\tss_flags=%i, should be SS_AUTODISARM\n",
+                               stk.ss_flags);
+               exit(EXIT_FAILURE);
+       }
+       printf("[OK]\tsigaltstack is still SS_AUTODISARM after signal\n");
+
+       printf("[OK]\tTest passed\n");
+       return 0;
+}