Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 31 Mar 2019 14:48:58 +0000 (07:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 31 Mar 2019 14:48:58 +0000 (07:48 -0700)
Pull locking fixlet from Thomas Gleixner:
 "Trivial update to the maintainers file"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  MAINTAINERS: Remove deleted file from futex file pattern

467 files changed:
.mailmap
Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-mtk.txt [deleted file]
Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt [deleted file]
Documentation/devicetree/bindings/i2c/i2c-stu300.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt [deleted file]
Documentation/devicetree/bindings/i2c/i2c-vt8500.txt [deleted file]
Documentation/devicetree/bindings/i2c/i2c-wmt.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-xscale.txt [deleted file]
Documentation/devicetree/bindings/net/dsa/qca8k.txt
Documentation/devicetree/bindings/serial/mtk-uart.txt
Documentation/filesystems/mount_api.txt
Documentation/i2c/busses/i2c-i801
Documentation/networking/msg_zerocopy.rst
Documentation/networking/netdev-FAQ.rst
Documentation/networking/nf_flowtable.txt
Documentation/networking/snmp_counter.rst
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/boot/dts/bcm2835-rpi-b-rev2.dts
arch/arm/boot/dts/imx6dl-yapp4-common.dtsi
arch/arm/boot/dts/imx6qdl-icore-rqs.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6ull-pinfunc-snvs.h
arch/arm/boot/dts/ste-nomadik-nhk15.dts
arch/arm/configs/imx_v4_v5_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/mach-imx/cpuidle-imx6q.c
arch/arm/mach-imx/mach-imx51.c
arch/arm64/Kconfig.platforms
arch/arm64/boot/dts/nvidia/tegra186.dtsi
arch/arm64/boot/dts/renesas/r8a774c0.dtsi
arch/arm64/boot/dts/renesas/r8a77990.dtsi
arch/arm64/kernel/setup.c
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/lib/memcmp_64.S
arch/powerpc/net/bpf_jit.h
arch/powerpc/net/bpf_jit32.h
arch/powerpc/net/bpf_jit64.h
arch/powerpc/net/bpf_jit_comp64.c
arch/powerpc/platforms/pseries/pseries_energy.c
arch/powerpc/platforms/pseries/ras.c
arch/s390/include/asm/ap.h
arch/s390/include/asm/elf.h
arch/s390/include/asm/lowcore.h
arch/s390/kernel/perf_cpum_cf_diag.c
arch/s390/kernel/smp.c
arch/s390/kernel/vtime.c
block/blk-flush.c
block/blk-mq.c
block/blk-mq.h
drivers/acpi/bus.c
drivers/acpi/cppc_acpi.c
drivers/android/binder.c
drivers/android/binder_alloc.c
drivers/ata/libata-zpodd.c
drivers/block/zram/zram_drv.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/scpi-cpufreq.c
drivers/dma/stm32-mdma.c
drivers/gpio/gpio-adnp.c
drivers/gpio/gpio-aspeed.c
drivers/gpio/gpio-exar.c
drivers/gpio/gpio-mockup.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_file.c
drivers/gpu/drm/i915/gvt/cmd_parser.c
drivers/gpu/drm/i915/gvt/gtt.c
drivers/gpu/drm/i915/gvt/gtt.h
drivers/gpu/drm/i915/gvt/mmio_context.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/selftests/i915_gem_evict.c
drivers/gpu/drm/meson/meson_drv.c
drivers/gpu/drm/meson/meson_dw_hdmi.c
drivers/gpu/drm/rockchip/rockchip_drm_vop.c
drivers/gpu/drm/tegra/hub.c
drivers/gpu/drm/tegra/vic.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vkms/vkms_gem.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-i801.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/io-pgtable-arm-v7s.c
drivers/iommu/iommu.c
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/leds/leds-pca9532.c
drivers/leds/trigger/ledtrig-netdev.c
drivers/misc/habanalabs/command_submission.c
drivers/misc/habanalabs/debugfs.c
drivers/misc/habanalabs/device.c
drivers/misc/habanalabs/goya/goya.c
drivers/misc/habanalabs/habanalabs.h
drivers/misc/habanalabs/hw_queue.c
drivers/misc/habanalabs/memory.c
drivers/misc/habanalabs/mmu.c
drivers/net/Kconfig
drivers/net/dsa/qca8k.c
drivers/net/dsa/qca8k.h
drivers/net/ethernet/3com/3c515.c
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/aquantia/atlantic/aq_ring.c
drivers/net/ethernet/cadence/macb_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/mellanox/mlxsw/core_env.c
drivers/net/ethernet/micrel/ks8851.c
drivers/net/ethernet/micrel/ks8851.h
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
drivers/net/ethernet/realtek/atp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/stmicro/stmmac/ring_mode.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/ti/netcp_ethss.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ieee802154/adf7242.c
drivers/net/ieee802154/mac802154_hwsim.c
drivers/net/phy/Kconfig
drivers/net/phy/broadcom.c
drivers/net/phy/dp83822.c
drivers/net/phy/meson-gxl.c
drivers/net/phy/phy_device.c
drivers/net/tun.c
drivers/net/usb/aqc111.c
drivers/net/usb/cdc_ether.c
drivers/net/vxlan.c
drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
drivers/net/wireless/mediatek/mt76/dma.c
drivers/net/wireless/mediatek/mt76/mac80211.c
drivers/net/wireless/mediatek/mt76/mt76.h
drivers/net/wireless/mediatek/mt76/mt7603/beacon.c
drivers/net/wireless/mediatek/mt76/mt7603/dma.c
drivers/net/wireless/mediatek/mt76/mt7603/init.c
drivers/net/wireless/mediatek/mt76/mt7603/mac.c
drivers/net/wireless/mediatek/mt76/mt7603/main.c
drivers/net/wireless/mediatek/mt76/mt7603/mcu.c
drivers/net/wireless/mediatek/mt76/mt7603/soc.c
drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h
drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
drivers/net/wireless/mediatek/mt76/mt76x02.h
drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c
drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
drivers/net/wireless/mediatek/mt76/mt76x02_mac.h
drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
drivers/net/wireless/mediatek/mt76/mt76x02_phy.c
drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c
drivers/net/wireless/mediatek/mt76/mt76x02_util.c
drivers/net/wireless/mediatek/mt76/mt76x2/init.c
drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h
drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c
drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c
drivers/net/wireless/mediatek/mt76/mt76x2/phy.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
drivers/net/wireless/mediatek/mt76/mt76x2/usb_mac.c
drivers/net/wireless/mediatek/mt76/tx.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/mediatek/mt7601u/usb.c
drivers/nvme/host/multipath.c
drivers/nvme/host/tcp.c
drivers/nvme/target/core.c
drivers/nvme/target/io-cmd-file.c
drivers/parport/daisy.c
drivers/parport/probe.c
drivers/parport/share.c
drivers/pci/pci.h
drivers/pci/pcie/bw_notification.c
drivers/pci/probe.c
drivers/phy/allwinner/phy-sun4i-usb.c
drivers/s390/cio/chsc.c
drivers/s390/cio/vfio_ccw_drv.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/ap_bus.h
drivers/s390/crypto/ap_queue.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fc.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/aacraid/aacraid.h
drivers/scsi/aacraid/commsup.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/sd.c
drivers/soc/bcm/bcm2835-power.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/staging/axis-fifo/Kconfig
drivers/staging/comedi/comedidev.h
drivers/staging/comedi/drivers.c
drivers/staging/comedi/drivers/ni_mio_common.c
drivers/staging/erofs/dir.c
drivers/staging/erofs/unzip_vle.c
drivers/staging/erofs/unzip_vle_lz4.c
drivers/staging/mt7621-dts/gbpc1.dts
drivers/staging/mt7621-dts/mt7621.dtsi
drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt [deleted file]
drivers/staging/mt7621-eth/Kconfig [deleted file]
drivers/staging/mt7621-eth/Makefile [deleted file]
drivers/staging/mt7621-eth/TODO [deleted file]
drivers/staging/mt7621-eth/ethtool.c [deleted file]
drivers/staging/mt7621-eth/ethtool.h [deleted file]
drivers/staging/mt7621-eth/gsw_mt7620.h [deleted file]
drivers/staging/mt7621-eth/gsw_mt7621.c [deleted file]
drivers/staging/mt7621-eth/mdio.c [deleted file]
drivers/staging/mt7621-eth/mdio.h [deleted file]
drivers/staging/mt7621-eth/mdio_mt7620.c [deleted file]
drivers/staging/mt7621-eth/mtk_eth_soc.c [deleted file]
drivers/staging/mt7621-eth/mtk_eth_soc.h [deleted file]
drivers/staging/mt7621-eth/soc_mt7621.c [deleted file]
drivers/staging/mt7621-pci/Kconfig
drivers/staging/octeon/ethernet-mdio.c
drivers/staging/octeon/ethernet.c
drivers/staging/octeon/octeon-ethernet.h
drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
drivers/staging/rtl8188eu/core/rtw_xmit.c
drivers/staging/rtl8188eu/include/rtw_xmit.h
drivers/staging/rtl8712/rtl8712_cmd.c
drivers/staging/rtl8712/rtl8712_cmd.h
drivers/staging/rtl8723bs/core/rtw_xmit.c
drivers/staging/rtl8723bs/include/rtw_xmit.h
drivers/staging/rtlwifi/phydm/rtl_phydm.c
drivers/staging/rtlwifi/rtl8822be/fw.c
drivers/staging/speakup/speakup_soft.c
drivers/staging/speakup/spk_priv.h
drivers/staging/speakup/synth.c
drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
drivers/staging/vt6655/device_main.c
drivers/tty/serial/ar933x_uart.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/kgdboc.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mvebu-uart.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/qcom_geni_serial.c
drivers/tty/serial/sc16is7xx.c
drivers/tty/serial/sh-sci.c
drivers/tty/tty_port.c
drivers/usb/class/cdc-acm.c
drivers/usb/common/common.c
drivers/usb/core/hcd.c
drivers/usb/dwc3/dwc3-pci.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/udc/net2272.c
drivers/usb/gadget/udc/net2280.c
drivers/usb/host/u132-hcd.c
drivers/usb/host/xhci-dbgcap.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-rcar.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.h
drivers/usb/misc/usb251xb.c
drivers/usb/mtu3/Kconfig
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/mos7720.c
drivers/usb/serial/option.c
drivers/usb/typec/tcpm/tcpm.c
drivers/usb/typec/tcpm/wcove.c
drivers/virt/vboxguest/vboxguest_core.c
drivers/virt/vboxguest/vboxguest_core.h
drivers/virt/vboxguest/vboxguest_linux.c
drivers/virt/vboxguest/vboxguest_utils.c
drivers/virt/vboxguest/vboxguest_version.h
drivers/virt/vboxguest/vmmdev.h
fs/afs/fsclient.c
fs/afs/yfsclient.c
fs/btrfs/extent-tree.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/ceph/inode.c
fs/fs_parser.c
fs/io_uring.c
fs/lockd/host.c
fs/locks.c
fs/nfs/client.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/nfs4proc.c
fs/ocfs2/refcounttree.c
fs/open.c
fs/proc/kcore.c
fs/proc/proc_sysctl.c
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/scrub/btree.c
fs/xfs/scrub/dabtree.c
fs/xfs/xfs_discard.c
fs/xfs/xfs_file.c
include/acpi/acoutput.h
include/acpi/platform/aclinux.h
include/linux/atalk.h
include/linux/bpf.h
include/linux/bpf_verifier.h
include/linux/brcmphy.h
include/linux/device.h
include/linux/hugetlb.h
include/linux/kcore.h
include/linux/list.h
include/linux/net.h
include/linux/page-isolation.h
include/linux/parport.h
include/linux/platform_data/gpio/gpio-amd-fch.h
include/linux/sched/signal.h
include/linux/slab.h
include/linux/socket.h
include/linux/vbox_utils.h
include/net/act_api.h
include/net/sch_generic.h
include/net/sctp/checksum.h
include/net/sock.h
include/net/tc_act/tc_gact.h
include/net/xdp_sock.h
include/uapi/linux/bpf.h
include/uapi/linux/vbox_vmmdev_types.h
kernel/bpf/syscall.c
kernel/bpf/verifier.c
kernel/ptrace.c
kernel/trace/ftrace.c
kernel/trace/trace_dynevent.c
kernel/trace/trace_events_hist.c
kernel/watchdog.c
lib/rhashtable.c
lib/sbitmap.c
mm/debug.c
mm/kasan/kasan.h
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/page_alloc.c
mm/page_isolation.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slub.c
mm/sparse.c
net/appletalk/aarp.c
net/appletalk/ddp.c
net/bridge/br_netfilter_hooks.c
net/bridge/br_netfilter_ipv6.c
net/ceph/messenger.c
net/core/devlink.c
net/core/filter.c
net/core/net-sysfs.c
net/dccp/ipv6.c
net/ipv6/netfilter/ip6t_srh.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/mpls/mpls_iptunnel.c
net/ncsi/ncsi-netlink.c
net/netfilter/Kconfig
net/netfilter/nf_conntrack_sip.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_objref.c
net/netfilter/nft_redir.c
net/netfilter/nft_set_rbtree.c
net/netlink/genetlink.c
net/nfc/llcp_sock.c
net/openvswitch/datapath.c
net/packet/af_packet.c
net/rose/rose_subr.c
net/rxrpc/output.c
net/sched/Kconfig
net/sched/act_api.c
net/sched/act_bpf.c
net/sched/act_connmark.c
net/sched/act_csum.c
net/sched/act_gact.c
net/sched/act_ife.c
net/sched/act_ipt.c
net/sched/act_mirred.c
net/sched/act_nat.c
net/sched/act_pedit.c
net/sched/act_police.c
net/sched/act_sample.c
net/sched/act_simple.c
net/sched/act_skbedit.c
net/sched/act_skbmod.c
net/sched/act_tunnel_key.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sched/sch_cake.c
net/sctp/socket.c
net/socket.c
net/strparser/strparser.c
net/sunrpc/clnt.c
net/sunrpc/xprtsock.c
net/tipc/group.c
net/tipc/net.c
net/tipc/node.c
net/tipc/socket.c
net/tipc/topsrv.c
net/xdp/xdp_umem.c
scripts/Makefile.build
scripts/checkpatch.pl
scripts/coccinelle/free/put_device.cocci
scripts/coccinelle/misc/badty.cocci
scripts/kconfig/lxdialog/inputbox.c
scripts/kconfig/nconf.c
scripts/kconfig/nconf.gui.c
scripts/mod/modpost.c
security/Kconfig
security/yama/yama_lsm.c
sound/core/oss/pcm_oss.c
sound/core/pcm_native.c
sound/core/rawmidi.c
sound/core/seq/oss/seq_oss_synth.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_realtek.c
tools/include/uapi/linux/bpf.h
tools/lib/bpf/Makefile
tools/lib/bpf/README.rst
tools/lib/bpf/btf.c
tools/lib/bpf/libbpf.c
tools/lib/bpf/xsk.c
tools/objtool/Makefile
tools/testing/selftests/bpf/bpf_helpers.h
tools/testing/selftests/bpf/prog_tests/map_lock.c
tools/testing/selftests/bpf/prog_tests/spinlock.c
tools/testing/selftests/bpf/progs/test_sock_fields_kern.c
tools/testing/selftests/bpf/test_btf.c
tools/testing/selftests/bpf/test_sock_fields.c
tools/testing/selftests/bpf/verifier/calls.c
tools/testing/selftests/bpf/verifier/ref_tracking.c
tools/testing/selftests/bpf/verifier/sock.c
tools/testing/selftests/tc-testing/tc-tests/actions/bpf.json
tools/testing/selftests/tc-testing/tc-tests/actions/connmark.json
tools/testing/selftests/tc-testing/tc-tests/actions/csum.json
tools/testing/selftests/tc-testing/tc-tests/actions/gact.json
tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
tools/testing/selftests/tc-testing/tc-tests/actions/mirred.json
tools/testing/selftests/tc-testing/tc-tests/actions/nat.json
tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json [new file with mode: 0644]
tools/testing/selftests/tc-testing/tc-tests/actions/police.json
tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
tools/testing/selftests/tc-testing/tc-tests/actions/simple.json
tools/testing/selftests/tc-testing/tc-tests/actions/skbedit.json
tools/testing/selftests/tc-testing/tc-tests/actions/skbmod.json
tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
tools/testing/selftests/tc-testing/tc-tests/actions/vlan.json

index 37e1847c798869a62a797840835341c6af48bbb7..b2cde8668dcc38f85c6f0f1bac9da5fa2e9f63b9 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -224,3 +224,5 @@ Yakir Yang <kuankuan.y@gmail.com> <ykk@rock-chips.com>
 Yusuke Goda <goda.yusuke@renesas.com>
 Gustavo Padovan <gustavo@las.ic.unicamp.br>
 Gustavo Padovan <padovan@profusion.mobi>
+Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
+Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
diff --git a/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt b/Documentation/devicetree/bindings/i2c/i2c-iop3xx.txt
new file mode 100644 (file)
index 0000000..dcc8390
--- /dev/null
@@ -0,0 +1,20 @@
+i2c Controller on XScale platforms such as IOP3xx and IXP4xx
+
+Required properties:
+- compatible : Must be one of
+  "intel,iop3xx-i2c"
+  "intel,ixp4xx-i2c";
+- reg
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Example:
+
+i2c@c8011000 {
+       compatible = "intel,ixp4xx-i2c";
+       reg = <0xc8011000 0x18>;
+       interrupts = <33 IRQ_TYPE_LEVEL_LOW>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt b/Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
new file mode 100644 (file)
index 0000000..ee4c324
--- /dev/null
@@ -0,0 +1,44 @@
+* MediaTek's I2C controller
+
+The MediaTek's I2C controller is used to interface with I2C devices.
+
+Required properties:
+  - compatible: value should be either of the following.
+      "mediatek,mt2701-i2c", "mediatek,mt6577-i2c": for MediaTek MT2701
+      "mediatek,mt2712-i2c": for MediaTek MT2712
+      "mediatek,mt6577-i2c": for MediaTek MT6577
+      "mediatek,mt6589-i2c": for MediaTek MT6589
+      "mediatek,mt7622-i2c": for MediaTek MT7622
+      "mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for MediaTek MT7623
+      "mediatek,mt7629-i2c", "mediatek,mt2712-i2c": for MediaTek MT7629
+      "mediatek,mt8173-i2c": for MediaTek MT8173
+  - reg: physical base address of the controller and dma base, length of memory
+    mapped region.
+  - interrupts: interrupt number to the cpu.
+  - clock-div: the fixed value for frequency divider of clock source in i2c
+    module. Each IC may be different.
+  - clocks: clock name from clock manager
+  - clock-names: Must include "main" and "dma", if enable have-pmic need include
+    "pmic" extra.
+
+Optional properties:
+  - clock-frequency: Frequency in Hz of the bus when transfer, the default value
+    is 100000.
+  - mediatek,have-pmic: platform can control i2c form special pmic side.
+    Only mt6589 and mt8135 support this feature.
+  - mediatek,use-push-pull: IO config use push-pull mode.
+
+Example:
+
+       i2c0: i2c@1100d000 {
+                       compatible = "mediatek,mt6577-i2c";
+                       reg = <0x1100d000 0x70>,
+                             <0x11000300 0x80>;
+                       interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_LOW>;
+                       clock-frequency = <400000>;
+                       mediatek,have-pmic;
+                       clock-div = <16>;
+                       clocks = <&i2c0_ck>, <&ap_dma_ck>;
+                       clock-names = "main", "dma";
+       };
+
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mtk.txt b/Documentation/devicetree/bindings/i2c/i2c-mtk.txt
deleted file mode 100644 (file)
index ee4c324..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-* MediaTek's I2C controller
-
-The MediaTek's I2C controller is used to interface with I2C devices.
-
-Required properties:
-  - compatible: value should be either of the following.
-      "mediatek,mt2701-i2c", "mediatek,mt6577-i2c": for MediaTek MT2701
-      "mediatek,mt2712-i2c": for MediaTek MT2712
-      "mediatek,mt6577-i2c": for MediaTek MT6577
-      "mediatek,mt6589-i2c": for MediaTek MT6589
-      "mediatek,mt7622-i2c": for MediaTek MT7622
-      "mediatek,mt7623-i2c", "mediatek,mt6577-i2c": for MediaTek MT7623
-      "mediatek,mt7629-i2c", "mediatek,mt2712-i2c": for MediaTek MT7629
-      "mediatek,mt8173-i2c": for MediaTek MT8173
-  - reg: physical base address of the controller and dma base, length of memory
-    mapped region.
-  - interrupts: interrupt number to the cpu.
-  - clock-div: the fixed value for frequency divider of clock source in i2c
-    module. Each IC may be different.
-  - clocks: clock name from clock manager
-  - clock-names: Must include "main" and "dma", if enable have-pmic need include
-    "pmic" extra.
-
-Optional properties:
-  - clock-frequency: Frequency in Hz of the bus when transfer, the default value
-    is 100000.
-  - mediatek,have-pmic: platform can control i2c form special pmic side.
-    Only mt6589 and mt8135 support this feature.
-  - mediatek,use-push-pull: IO config use push-pull mode.
-
-Example:
-
-       i2c0: i2c@1100d000 {
-                       compatible = "mediatek,mt6577-i2c";
-                       reg = <0x1100d000 0x70>,
-                             <0x11000300 0x80>;
-                       interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_LOW>;
-                       clock-frequency = <400000>;
-                       mediatek,have-pmic;
-                       clock-div = <16>;
-                       clocks = <&i2c0_ck>, <&ap_dma_ck>;
-                       clock-names = "main", "dma";
-       };
-
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt b/Documentation/devicetree/bindings/i2c/i2c-st-ddci2c.txt
deleted file mode 100644 (file)
index bd81a48..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-ST Microelectronics DDC I2C
-
-Required properties :
-- compatible : Must be "st,ddci2c"
-- reg: physical base address of the controller and length of memory mapped
-     region.
-- interrupts: interrupt number to the cpu.
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties:
-- Child nodes conforming to i2c bus binding
-
-Examples :
-
diff --git a/Documentation/devicetree/bindings/i2c/i2c-stu300.txt b/Documentation/devicetree/bindings/i2c/i2c-stu300.txt
new file mode 100644 (file)
index 0000000..bd81a48
--- /dev/null
@@ -0,0 +1,15 @@
+ST Microelectronics DDC I2C
+
+Required properties :
+- compatible : Must be "st,ddci2c"
+- reg: physical base address of the controller and length of memory mapped
+     region.
+- interrupts: interrupt number to the cpu.
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Examples :
+
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sun6i-p2wi.txt
new file mode 100644 (file)
index 0000000..49df005
--- /dev/null
@@ -0,0 +1,41 @@
+
+* Allwinner P2WI (Push/Pull 2 Wire Interface) controller
+
+Required properties :
+
+ - reg             : Offset and length of the register set for the device.
+ - compatible      : Should one of the following:
+                     - "allwinner,sun6i-a31-p2wi"
+ - interrupts      : The interrupt line connected to the P2WI peripheral.
+ - clocks          : The gate clk connected to the P2WI peripheral.
+ - resets          : The reset line connected to the P2WI peripheral.
+
+Optional properties :
+
+ - clock-frequency : Desired P2WI bus clock frequency in Hz. If not set the
+default frequency is 100kHz
+
+A P2WI may contain one child node encoding a P2WI slave device.
+
+Slave device properties:
+  Required properties:
+   - reg           : the I2C slave address used during the initialization
+                     process to switch from I2C to P2WI mode
+
+Example:
+
+       p2wi@1f03400 {
+               compatible = "allwinner,sun6i-a31-p2wi";
+               reg = <0x01f03400 0x400>;
+               interrupts = <0 39 4>;
+               clocks = <&apb0_gates 3>;
+               clock-frequency = <6000000>;
+               resets = <&apb0_rst 3>;
+
+               axp221: pmic@68 {
+                       compatible = "x-powers,axp221";
+                       reg = <0x68>;
+
+                       /* ... */
+               };
+       };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
deleted file mode 100644 (file)
index 49df005..0000000
+++ /dev/null
@@ -1,41 +0,0 @@
-
-* Allwinner P2WI (Push/Pull 2 Wire Interface) controller
-
-Required properties :
-
- - reg             : Offset and length of the register set for the device.
- - compatible      : Should one of the following:
-                     - "allwinner,sun6i-a31-p2wi"
- - interrupts      : The interrupt line connected to the P2WI peripheral.
- - clocks          : The gate clk connected to the P2WI peripheral.
- - resets          : The reset line connected to the P2WI peripheral.
-
-Optional properties :
-
- - clock-frequency : Desired P2WI bus clock frequency in Hz. If not set the
-default frequency is 100kHz
-
-A P2WI may contain one child node encoding a P2WI slave device.
-
-Slave device properties:
-  Required properties:
-   - reg           : the I2C slave address used during the initialization
-                     process to switch from I2C to P2WI mode
-
-Example:
-
-       p2wi@1f03400 {
-               compatible = "allwinner,sun6i-a31-p2wi";
-               reg = <0x01f03400 0x400>;
-               interrupts = <0 39 4>;
-               clocks = <&apb0_gates 3>;
-               clock-frequency = <6000000>;
-               resets = <&apb0_rst 3>;
-
-               axp221: pmic@68 {
-                       compatible = "x-powers,axp221";
-                       reg = <0x68>;
-
-                       /* ... */
-               };
-       };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt b/Documentation/devicetree/bindings/i2c/i2c-vt8500.txt
deleted file mode 100644 (file)
index 94a425e..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-* Wondermedia I2C Controller
-
-Required properties :
-
- - compatible : should be "wm,wm8505-i2c"
- - reg : Offset and length of the register set for the device
- - interrupts : <IRQ> where IRQ is the interrupt number
- - clocks : phandle to the I2C clock source
-
-Optional properties :
-
- - clock-frequency : desired I2C bus clock frequency in Hz.
-       Valid values are 100000 and 400000.
-       Default to 100000 if not specified, or invalid value.
-
-Example :
-
-       i2c_0: i2c@d8280000 {
-               compatible = "wm,wm8505-i2c";
-               reg = <0xd8280000 0x1000>;
-               interrupts = <19>;
-               clocks = <&clki2c0>;
-               clock-frequency = <400000>;
-       };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-wmt.txt b/Documentation/devicetree/bindings/i2c/i2c-wmt.txt
new file mode 100644 (file)
index 0000000..94a425e
--- /dev/null
@@ -0,0 +1,24 @@
+* Wondermedia I2C Controller
+
+Required properties :
+
+ - compatible : should be "wm,wm8505-i2c"
+ - reg : Offset and length of the register set for the device
+ - interrupts : <IRQ> where IRQ is the interrupt number
+ - clocks : phandle to the I2C clock source
+
+Optional properties :
+
+ - clock-frequency : desired I2C bus clock frequency in Hz.
+       Valid values are 100000 and 400000.
+       Default to 100000 if not specified, or invalid value.
+
+Example :
+
+       i2c_0: i2c@d8280000 {
+               compatible = "wm,wm8505-i2c";
+               reg = <0xd8280000 0x1000>;
+               interrupts = <19>;
+               clocks = <&clki2c0>;
+               clock-frequency = <400000>;
+       };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-xscale.txt b/Documentation/devicetree/bindings/i2c/i2c-xscale.txt
deleted file mode 100644 (file)
index dcc8390..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-i2c Controller on XScale platforms such as IOP3xx and IXP4xx
-
-Required properties:
-- compatible : Must be one of
-  "intel,iop3xx-i2c"
-  "intel,ixp4xx-i2c";
-- reg
-- #address-cells = <1>;
-- #size-cells = <0>;
-
-Optional properties:
-- Child nodes conforming to i2c bus binding
-
-Example:
-
-i2c@c8011000 {
-       compatible = "intel,ixp4xx-i2c";
-       reg = <0xc8011000 0x18>;
-       interrupts = <33 IRQ_TYPE_LEVEL_LOW>;
-};
index bbcb255c3150230978fba796b320a71c206ddbad..93a7469e70d4131fbc2d7f2daffd1917020709ee 100644 (file)
@@ -12,10 +12,15 @@ Required properties:
 Subnodes:
 
 The integrated switch subnode should be specified according to the binding
-described in dsa/dsa.txt. As the QCA8K switches do not have a N:N mapping of
-port and PHY id, each subnode describing a port needs to have a valid phandle
-referencing the internal PHY connected to it. The CPU port of this switch is
-always port 0.
+described in dsa/dsa.txt. If the QCA8K switch is connect to a SoC's external
+mdio-bus each subnode describing a port needs to have a valid phandle
+referencing the internal PHY it is connected to. This is because there's no
+N:N mapping of port and PHY id.
+
+Don't use mixed external and internal mdio-bus configurations, as this is
+not supported by the hardware.
+
+The CPU port of this switch is always port 0.
 
 A CPU port node has the following optional node:
 
@@ -31,8 +36,9 @@ For QCA8K the 'fixed-link' sub-node supports only the following properties:
 - 'full-duplex' (boolean, optional), to indicate that full duplex is
   used. When absent, half duplex is assumed.
 
-Example:
+Examples:
 
+for the external mdio-bus configuration:
 
        &mdio0 {
                phy_port1: phy@0 {
@@ -55,12 +61,12 @@ Example:
                        reg = <4>;
                };
 
-               switch0@0 {
+               switch@10 {
                        compatible = "qca,qca8337";
                        #address-cells = <1>;
                        #size-cells = <0>;
 
-                       reg = <0>;
+                       reg = <0x10>;
 
                        ports {
                                #address-cells = <1>;
@@ -108,3 +114,56 @@ Example:
                        };
                };
        };
+
+for the internal master mdio-bus configuration:
+
+       &mdio0 {
+               switch@10 {
+                       compatible = "qca,qca8337";
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       reg = <0x10>;
+
+                       ports {
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+
+                               port@0 {
+                                       reg = <0>;
+                                       label = "cpu";
+                                       ethernet = <&gmac1>;
+                                       phy-mode = "rgmii";
+                                       fixed-link {
+                                               speed = 1000;
+                                               full-duplex;
+                                       };
+                               };
+
+                               port@1 {
+                                       reg = <1>;
+                                       label = "lan1";
+                               };
+
+                               port@2 {
+                                       reg = <2>;
+                                       label = "lan2";
+                               };
+
+                               port@3 {
+                                       reg = <3>;
+                                       label = "lan3";
+                               };
+
+                               port@4 {
+                                       reg = <4>;
+                                       label = "lan4";
+                               };
+
+                               port@5 {
+                                       reg = <5>;
+                                       label = "wan";
+                               };
+                       };
+               };
+       };
index 742cb470595ba4d7e2a3e467328d33a8bf5335b5..bcfb13194f16364b0ac77a4391a26c0bb34206d0 100644 (file)
@@ -16,6 +16,7 @@ Required properties:
   * "mediatek,mt8127-uart" for MT8127 compatible UARTS
   * "mediatek,mt8135-uart" for MT8135 compatible UARTS
   * "mediatek,mt8173-uart" for MT8173 compatible UARTS
+  * "mediatek,mt8183-uart", "mediatek,mt6577-uart" for MT8183 compatible UARTS
   * "mediatek,mt6577-uart" for MT6577 and all of the above
 
 - reg: The base address of the UART register bank.
index 944d1965e917e9a91496637ab484d5197d68223d..00ff0cfccfa71cdce0d02ddd8608cf962ee99308 100644 (file)
@@ -12,11 +12,13 @@ CONTENTS
 
  (4) Filesystem context security.
 
- (5) VFS filesystem context operations.
+ (5) VFS filesystem context API.
 
- (6) Parameter description.
+ (6) Superblock creation helpers.
 
- (7) Parameter helper functions.
+ (7) Parameter description.
+
+ (8) Parameter helper functions.
 
 
 ========
@@ -41,12 +43,15 @@ The creation of new mounts is now to be done in a multistep process:
 
  (7) Destroy the context.
 
-To support this, the file_system_type struct gains a new field:
+To support this, the file_system_type struct gains two new fields:
 
        int (*init_fs_context)(struct fs_context *fc);
+       const struct fs_parameter_description *parameters;
 
-which is invoked to set up the filesystem-specific parts of a filesystem
-context, including the additional space.
+The first is invoked to set up the filesystem-specific parts of a filesystem
+context, including the additional space, and the second points to the
+parameter description for validation at registration time and querying by a
+future system call.
 
 Note that security initialisation is done *after* the filesystem is called so
 that the namespaces may be adjusted first.
@@ -73,9 +78,9 @@ context.  This is represented by the fs_context structure:
                void                    *s_fs_info;
                unsigned int            sb_flags;
                unsigned int            sb_flags_mask;
+               unsigned int            s_iflags;
+               unsigned int            lsm_flags;
                enum fs_context_purpose purpose:8;
-               bool                    sloppy:1;
-               bool                    silent:1;
                ...
        };
 
@@ -141,6 +146,10 @@ The fs_context fields are as follows:
 
      Which bits SB_* flags are to be set/cleared in super_block::s_flags.
 
+ (*) unsigned int s_iflags
+
+     These will be bitwise-OR'd with s->s_iflags when a superblock is created.
+
  (*) enum fs_context_purpose
 
      This indicates the purpose for which the context is intended.  The
@@ -150,17 +159,6 @@ The fs_context fields are as follows:
        FS_CONTEXT_FOR_SUBMOUNT         -- New automatic submount of extant mount
        FS_CONTEXT_FOR_RECONFIGURE      -- Change an existing mount
 
- (*) bool sloppy
- (*) bool silent
-
-     These are set if the sloppy or silent mount options are given.
-
-     [NOTE] sloppy is probably unnecessary when userspace passes over one
-     option at a time since the error can just be ignored if userspace deems it
-     to be unimportant.
-
-     [NOTE] silent is probably redundant with sb_flags & SB_SILENT.
-
 The mount context is created by calling vfs_new_fs_context() or
 vfs_dup_fs_context() and is destroyed with put_fs_context().  Note that the
 structure is not refcounted.
@@ -342,28 +340,47 @@ number of operations used by the new mount code for this purpose:
      It should return 0 on success or a negative error code on failure.
 
 
-=================================
-VFS FILESYSTEM CONTEXT OPERATIONS
-=================================
+==========================
+VFS FILESYSTEM CONTEXT API
+==========================
 
-There are four operations for creating a filesystem context and
-one for destroying a context:
+There are four operations for creating a filesystem context and one for
+destroying a context:
 
- (*) struct fs_context *vfs_new_fs_context(struct file_system_type *fs_type,
-                                          struct dentry *reference,
-                                          unsigned int sb_flags,
-                                          unsigned int sb_flags_mask,
-                                          enum fs_context_purpose purpose);
+ (*) struct fs_context *fs_context_for_mount(
+               struct file_system_type *fs_type,
+               unsigned int sb_flags);
 
-     Create a filesystem context for a given filesystem type and purpose.  This
-     allocates the filesystem context, sets the superblock flags, initialises
-     the security and calls fs_type->init_fs_context() to initialise the
-     filesystem private data.
+     Allocate a filesystem context for the purpose of setting up a new mount,
+     whether that be with a new superblock or sharing an existing one.  This
+     sets the superblock flags, initialises the security and calls
+     fs_type->init_fs_context() to initialise the filesystem private data.
 
-     reference can be NULL or it may indicate the root dentry of a superblock
-     that is going to be reconfigured (FS_CONTEXT_FOR_RECONFIGURE) or
-     the automount point that triggered a submount (FS_CONTEXT_FOR_SUBMOUNT).
-     This is provided as a source of namespace information.
+     fs_type specifies the filesystem type that will manage the context and
+     sb_flags presets the superblock flags stored therein.
+
+ (*) struct fs_context *fs_context_for_reconfigure(
+               struct dentry *dentry,
+               unsigned int sb_flags,
+               unsigned int sb_flags_mask);
+
+     Allocate a filesystem context for the purpose of reconfiguring an
+     existing superblock.  dentry provides a reference to the superblock to be
+     configured.  sb_flags and sb_flags_mask indicate which superblock flags
+     need changing and to what.
+
+ (*) struct fs_context *fs_context_for_submount(
+               struct file_system_type *fs_type,
+               struct dentry *reference);
+
+     Allocate a filesystem context for the purpose of creating a new mount for
+     an automount point or other derived superblock.  fs_type specifies the
+     filesystem type that will manage the context and the reference dentry
+     supplies the parameters.  Namespaces are propagated from the reference
+     dentry's superblock also.
+
+     Note that it's not a requirement that the reference dentry be of the same
+     filesystem type as fs_type.
 
  (*) struct fs_context *vfs_dup_fs_context(struct fs_context *src_fc);
 
@@ -390,20 +407,6 @@ context pointer or a negative error code.
 For the remaining operations, if an error occurs, a negative error code will be
 returned.
 
- (*) int vfs_get_tree(struct fs_context *fc);
-
-     Get or create the mountable root and superblock, using the parameters in
-     the filesystem context to select/configure the superblock.  This invokes
-     the ->validate() op and then the ->get_tree() op.
-
-     [NOTE] ->validate() could perhaps be rolled into ->get_tree() and
-     ->reconfigure().
-
- (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
-
-     Create a mount given the parameters in the specified filesystem context.
-     Note that this does not attach the mount to anything.
-
  (*) int vfs_parse_fs_param(struct fs_context *fc,
                            struct fs_parameter *param);
 
@@ -432,17 +435,80 @@ returned.
      clear the pointer, but then becomes responsible for disposing of the
      object.
 
- (*) int vfs_parse_fs_string(struct fs_context *fc, char *key,
+ (*) int vfs_parse_fs_string(struct fs_context *fc, const char *key,
                             const char *value, size_t v_size);
 
-     A wrapper around vfs_parse_fs_param() that just passes a constant string.
+     A wrapper around vfs_parse_fs_param() that copies the value string it is
+     passed.
 
  (*) int generic_parse_monolithic(struct fs_context *fc, void *data);
 
      Parse a sys_mount() data page, assuming the form to be a text list
      consisting of key[=val] options separated by commas.  Each item in the
      list is passed to vfs_mount_option().  This is the default when the
-     ->parse_monolithic() operation is NULL.
+     ->parse_monolithic() method is NULL.
+
+ (*) int vfs_get_tree(struct fs_context *fc);
+
+     Get or create the mountable root and superblock, using the parameters in
+     the filesystem context to select/configure the superblock.  This invokes
+     the ->get_tree() method.
+
+ (*) struct vfsmount *vfs_create_mount(struct fs_context *fc);
+
+     Create a mount given the parameters in the specified filesystem context.
+     Note that this does not attach the mount to anything.
+
+
+===========================
+SUPERBLOCK CREATION HELPERS
+===========================
+
+A number of VFS helpers are available for use by filesystems for the creation
+or looking up of superblocks.
+
+ (*) struct super_block *
+     sget_fc(struct fs_context *fc,
+            int (*test)(struct super_block *sb, struct fs_context *fc),
+            int (*set)(struct super_block *sb, struct fs_context *fc));
+
+     This is the core routine.  If test is non-NULL, it searches for an
+     existing superblock matching the criteria held in the fs_context, using
+     the test function to match them.  If no match is found, a new superblock
+     is created and the set function is called to set it up.
+
+     Prior to the set function being called, fc->s_fs_info will be transferred
+     to sb->s_fs_info - and fc->s_fs_info will be cleared if set returns
+     success (ie. 0).
+
+The following helpers all wrap sget_fc():
+
+ (*) int vfs_get_super(struct fs_context *fc,
+                      enum vfs_get_super_keying keying,
+                      int (*fill_super)(struct super_block *sb,
+                                        struct fs_context *fc))
+
+     This creates/looks up a deviceless superblock.  The keying indicates how
+     many superblocks of this type may exist and in what manner they may be
+     shared:
+
+       (1) vfs_get_single_super
+
+           Only one such superblock may exist in the system.  Any further
+           attempt to get a new superblock gets this one (and any parameter
+           differences are ignored).
+
+       (2) vfs_get_keyed_super
+
+           Multiple superblocks of this type may exist and they're keyed on
+           their s_fs_info pointer (for example this may refer to a
+           namespace).
+
+       (3) vfs_get_independent_super
+
+           Multiple independent superblocks of this type may exist.  This
+           function never matches an existing one and always creates a new
+           one.
 
 
 =====================
@@ -454,35 +520,22 @@ There's a core description struct that links everything together:
 
        struct fs_parameter_description {
                const char      name[16];
-               u8              nr_params;
-               u8              nr_alt_keys;
-               u8              nr_enums;
-               bool            ignore_unknown;
-               bool            no_source;
-               const char *const *keys;
-               const struct constant_table *alt_keys;
                const struct fs_parameter_spec *specs;
                const struct fs_parameter_enum *enums;
        };
 
 For example:
 
-       enum afs_param {
+       enum {
                Opt_autocell,
                Opt_bar,
                Opt_dyn,
                Opt_foo,
                Opt_source,
-               nr__afs_params
        };
 
        static const struct fs_parameter_description afs_fs_parameters = {
                .name           = "kAFS",
-               .nr_params      = nr__afs_params,
-               .nr_alt_keys    = ARRAY_SIZE(afs_param_alt_keys),
-               .nr_enums       = ARRAY_SIZE(afs_param_enums),
-               .keys           = afs_param_keys,
-               .alt_keys       = afs_param_alt_keys,
                .specs          = afs_param_specs,
                .enums          = afs_param_enums,
        };
@@ -494,28 +547,24 @@ The members are as follows:
      The name to be used in error messages generated by the parse helper
      functions.
 
- (2) u8 nr_params;
-
-     The number of discrete parameter identifiers.  This indicates the number
-     of elements in the ->types[] array and also limits the values that may be
-     used in the values that the ->keys[] array maps to.
-
-     It is expected that, for example, two parameters that are related, say
-     "acl" and "noacl" with have the same ID, but will be flagged to indicate
-     that one is the inverse of the other.  The value can then be picked out
-     from the parse result.
+ (2) const struct fs_parameter_specification *specs;
 
- (3) const struct fs_parameter_specification *specs;
+     Table of parameter specifications, terminated with a null entry, where the
+     entries are of type:
 
-     Table of parameter specifications, where the entries are of type:
-
-       struct fs_parameter_type {
-               enum fs_parameter_spec  type:8;
-               u8                      flags;
+       struct fs_parameter_spec {
+               const char              *name;
+               u8                      opt;
+               enum fs_parameter_type  type:8;
+               unsigned short          flags;
        };
 
-     and the parameter identifier is the index to the array.  'type' indicates
-     the desired value type and must be one of:
+     The 'name' field is a string to match exactly to the parameter key (no
+     wildcards, patterns and no case-independence) and 'opt' is the value that
+     will be returned by the fs_parser() function in the case of a successful
+     match.
+
+     The 'type' field indicates the desired value type and must be one of:
 
        TYPE NAME               EXPECTED VALUE          RESULT IN
        ======================= ======================= =====================
@@ -525,85 +574,65 @@ The members are as follows:
        fs_param_is_u32_octal   32-bit octal int        result->uint_32
        fs_param_is_u32_hex     32-bit hex int          result->uint_32
        fs_param_is_s32         32-bit signed int       result->int_32
+       fs_param_is_u64         64-bit unsigned int     result->uint_64
        fs_param_is_enum        Enum value name         result->uint_32
        fs_param_is_string      Arbitrary string        param->string
        fs_param_is_blob        Binary blob             param->blob
        fs_param_is_blockdev    Blockdev path           * Needs lookup
        fs_param_is_path        Path                    * Needs lookup
-       fs_param_is_fd          File descriptor         param->file
-
-     And each parameter can be qualified with 'flags':
-
-       fs_param_v_optional     The value is optional
-       fs_param_neg_with_no    If key name is prefixed with "no", it is false
-       fs_param_neg_with_empty If value is "", it is false
-       fs_param_deprecated     The parameter is deprecated.
-
-     For example:
-
-       static const struct fs_parameter_spec afs_param_specs[nr__afs_params] = {
-               [Opt_autocell]  = { fs_param_is flag },
-               [Opt_bar]       = { fs_param_is_enum },
-               [Opt_dyn]       = { fs_param_is flag },
-               [Opt_foo]       = { fs_param_is_bool, fs_param_neg_with_no },
-               [Opt_source]    = { fs_param_is_string },
-       };
+       fs_param_is_fd          File descriptor         result->int_32
 
      Note that if the value is of fs_param_is_bool type, fs_parse() will try
      to match any string value against "0", "1", "no", "yes", "false", "true".
 
-     [!] NOTE that the table must be sorted according to primary key name so
-        that ->keys[] is also sorted.
-
- (4) const char *const *keys;
-
-     Table of primary key names for the parameters.  There must be one entry
-     per defined parameter.  The table is optional if ->nr_params is 0.  The
-     table is just an array of names e.g.:
+     Each parameter can also be qualified with 'flags':
 
-       static const char *const afs_param_keys[nr__afs_params] = {
-               [Opt_autocell]  = "autocell",
-               [Opt_bar]       = "bar",
-               [Opt_dyn]       = "dyn",
-               [Opt_foo]       = "foo",
-               [Opt_source]    = "source",
-       };
-
-     [!] NOTE that the table must be sorted such that the table can be searched
-        with bsearch() using strcmp().  This means that the Opt_* values must
-        correspond to the entries in this table.
-
- (5) const struct constant_table *alt_keys;
-     u8 nr_alt_keys;
-
-     Table of additional key names and their mappings to parameter ID plus the
-     number of elements in the table.  This is optional.  The table is just an
-     array of { name, integer } pairs, e.g.:
+       fs_param_v_optional     The value is optional
+       fs_param_neg_with_no    result->negated set if key is prefixed with "no"
+       fs_param_neg_with_empty result->negated set if value is ""
+       fs_param_deprecated     The parameter is deprecated.
 
-       static const struct constant_table afs_param_keys[] = {
-               { "baz",        Opt_bar },
-               { "dynamic",    Opt_dyn },
+     These are wrapped with a number of convenience wrappers:
+
+       MACRO                   SPECIFIES
+       ======================= ===============================================
+       fsparam_flag()          fs_param_is_flag
+       fsparam_flag_no()       fs_param_is_flag, fs_param_neg_with_no
+       fsparam_bool()          fs_param_is_bool
+       fsparam_u32()           fs_param_is_u32
+       fsparam_u32oct()        fs_param_is_u32_octal
+       fsparam_u32hex()        fs_param_is_u32_hex
+       fsparam_s32()           fs_param_is_s32
+       fsparam_u64()           fs_param_is_u64
+       fsparam_enum()          fs_param_is_enum
+       fsparam_string()        fs_param_is_string
+       fsparam_blob()          fs_param_is_blob
+       fsparam_bdev()          fs_param_is_blockdev
+       fsparam_path()          fs_param_is_path
+       fsparam_fd()            fs_param_is_fd
+
+     all of which take two arguments, name string and option number - for
+     example:
+
+       static const struct fs_parameter_spec afs_param_specs[] = {
+               fsparam_flag    ("autocell",    Opt_autocell),
+               fsparam_flag    ("dyn",         Opt_dyn),
+               fsparam_string  ("source",      Opt_source),
+               fsparam_flag_no ("foo",         Opt_foo),
+               {}
        };
 
-     [!] NOTE that the table must be sorted such that strcmp() can be used with
-        bsearch() to search the entries.
-
-     The parameter ID can also be fs_param_key_removed to indicate that a
-     deprecated parameter has been removed and that an error will be given.
-     This differs from fs_param_deprecated where the parameter may still have
-     an effect.
-
-     Further, the behaviour of the parameter may differ when an alternate name
-     is used (for instance with NFS, "v3", "v4.2", etc. are alternate names).
+     An addition macro, __fsparam() is provided that takes an additional pair
+     of arguments to specify the type and the flags for anything that doesn't
+     match one of the above macros.
 
  (6) const struct fs_parameter_enum *enums;
-     u8 nr_enums;
 
-     Table of enum value names to integer mappings and the number of elements
-     stored therein.  This is of type:
+     Table of enum value names to integer mappings, terminated with a null
+     entry.  This is of type:
 
        struct fs_parameter_enum {
-               u8              param_id;
+               u8              opt;
                char            name[14];
                u8              value;
        };
@@ -621,11 +650,6 @@ The members are as follows:
      try to look the value up in the enum table and the result will be stored
      in the parse result.
 
- (7) bool no_source;
-
-     If this is set, fs_parse() will ignore any "source" parameter and not
-     pass it to the filesystem.
-
 The parser should be pointed to by the parser pointer in the file_system_type
 struct as this will provide validation on registration (if
 CONFIG_VALIDATE_FS_PARSER=y) and will allow the description to be queried from
@@ -650,9 +674,8 @@ process the parameters it is given.
                int             value;
        };
 
-     and it must be sorted such that it can be searched using bsearch() using
-     strcmp().  If a match is found, the corresponding value is returned.  If a
-     match isn't found, the not_found value is returned instead.
+     If a match is found, the corresponding value is returned.  If a match
+     isn't found, the not_found value is returned instead.
 
  (*) bool validate_constant_table(const struct constant_table *tbl,
                                  size_t tbl_size,
@@ -665,36 +688,36 @@ process the parameters it is given.
      should just be set to lie inside the low-to-high range.
 
      If all is good, true is returned.  If the table is invalid, errors are
-     logged to dmesg, the stack is dumped and false is returned.
+     logged to dmesg and false is returned.
+
+ (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+
+     This performs some validation checks on a parameter description.  It
+     returns true if the description is good and false if it is not.  It will
+     log errors to dmesg if validation fails.
 
  (*) int fs_parse(struct fs_context *fc,
-                 const struct fs_param_parser *parser,
+                 const struct fs_parameter_description *desc,
                  struct fs_parameter *param,
-                 struct fs_param_parse_result *result);
+                 struct fs_parse_result *result);
 
      This is the main interpreter of parameters.  It uses the parameter
-     description (parser) to look up the name of the parameter to use and to
-     convert that to a parameter ID (stored in result->key).
+     description to look up a parameter by key name and to convert that to an
+     option number (which it returns).
 
      If successful, and if the parameter type indicates the result is a
      boolean, integer or enum type, the value is converted by this function and
-     the result stored in result->{boolean,int_32,uint_32}.
+     the result stored in result->{boolean,int_32,uint_32,uint_64}.
 
      If a match isn't initially made, the key is prefixed with "no" and no
      value is present then an attempt will be made to look up the key with the
      prefix removed.  If this matches a parameter for which the type has flag
-     fs_param_neg_with_no set, then a match will be made and the value will be
-     set to false/0/NULL.
-
-     If the parameter is successfully matched and, optionally, parsed
-     correctly, 1 is returned.  If the parameter isn't matched and
-     parser->ignore_unknown is set, then 0 is returned.  Otherwise -EINVAL is
-     returned.
-
- (*) bool fs_validate_description(const struct fs_parameter_description *desc);
+     fs_param_neg_with_no set, then a match will be made and result->negated
+     will be set to true.
 
-     This is validates the parameter description.  It returns true if the
-     description is good and false if it is not.
+     If the parameter isn't matched, -ENOPARAM will be returned; if the
+     parameter is matched, but the value is erroneous, -EINVAL will be
+     returned; otherwise the parameter's option number will be returned.
 
  (*) int fs_lookup_param(struct fs_context *fc,
                         struct fs_parameter *value,
index d1ee484a787d1b476cf13bcf7d7b53ac084fb63e..ee9984f3586897c870bd42b854f5d883b245621e 100644 (file)
@@ -36,6 +36,7 @@ Supported adapters:
   * Intel Cannon Lake (PCH)
   * Intel Cedar Fork (PCH)
   * Intel Ice Lake (PCH)
+  * Intel Comet Lake (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
index 18c1415e7bfad8f6e6e9b03febaf47f83a0f9915..ace56204dd03b1de816a89e77ad1b0d05bdbbd03 100644 (file)
@@ -50,7 +50,7 @@ the excellent reporting over at LWN.net or read the original code.
 
   patchset
     [PATCH net-next v4 0/9] socket sendmsg MSG_ZEROCOPY
-    http://lkml.kernel.org/r/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
+    https://lkml.kernel.org/netdev/20170803202945.70750-1-willemdebruijn.kernel@gmail.com
 
 
 Interface
index 0ac5fa77f50173c139376a3f2c271faff2e5e569..8c7a713cf657a769f011dfd45676473e2ee94e2e 100644 (file)
@@ -131,6 +131,19 @@ it to the maintainer to figure out what is the most recent and current
 version that should be applied. If there is any doubt, the maintainer
 will reply and ask what should be done.
 
+Q: I made changes to only a few patches in a patch series should I resend only those changed?
+--------------------------------------------------------------------------------------------
+A: No, please resend the entire patch series and make sure you do number your
+patches such that it is clear this is the latest and greatest set of patches
+that can be applied.
+
+Q: I submitted multiple versions of a patch series and it looks like a version other than the last one has been accepted, what should I do?
+-------------------------------------------------------------------------------------------------------------------------------------------
+A: There is no revert possible, once it is pushed out, it stays like that.
+Please send incremental versions on top of what has been merged in order to fix
+the patches the way they would look like if your latest patch series was to be
+merged.
+
 Q: How can I tell what patches are queued up for backporting to the various stable releases?
 --------------------------------------------------------------------------------------------
 A: Normally Greg Kroah-Hartman collects stable commits himself, but for
index 54128c50d508ef27e5c6f2026fc5dddd0df47ead..ca2136c76042c4ded1aa1608ea38f405e04772da 100644 (file)
@@ -44,10 +44,10 @@ including the Netfilter hooks and the flowtable fastpath bypass.
      /         \    /          \     |Routing |   /            \
   -->  ingress  ---> prerouting ---> |decision|   | postrouting |--> neigh_xmit
      \_________/    \__________/     ----------   \____________/          ^
-       |      ^          |               |               ^                |
-   flowtable  |          |          ____\/___            |                |
-       |      |          |         /         \           |                |
-    __\/___   |          --------->| forward |------------                |
+       |      ^                          |               ^                |
+   flowtable  |                     ____\/___            |                |
+       |      |                    /         \           |                |
+    __\/___   |                    | forward |------------                |
     |-----|   |                    \_________/                            |
     |-----|   |                 'flow offload' rule                       |
     |-----|   |                   adds entry to                           |
index 52b026be028f65f02aa8bf4a8816ab56f9e509ff..38a4edc4522b46f6ad3859f411eb46dfa4bc7f94 100644 (file)
@@ -413,7 +413,7 @@ algorithm.
 .. _F-RTO: https://tools.ietf.org/html/rfc5682
 
 TCP Fast Path
-============
+=============
 When kernel receives a TCP packet, it has two paths to handler the
 packet, one is fast path, another is slow path. The comment in kernel
 code provides a good explanation of them, I pasted them below::
@@ -681,6 +681,7 @@ The TCP stack receives an out of order duplicate packet, so it sends a
 DSACK to the sender.
 
 * TcpExtTCPDSACKRecv
+
 The TCP stack receives a DSACK, which indicates an acknowledged
 duplicate packet is received.
 
@@ -690,7 +691,7 @@ The TCP stack receives a DSACK, which indicate an out of order
 duplicate packet is received.
 
 invalid SACK and DSACK
-====================
+======================
 When a SACK (or DSACK) block is invalid, a corresponding counter would
 be updated. The validation method is base on the start/end sequence
 number of the SACK block. For more details, please refer the comment
@@ -704,11 +705,13 @@ explaination:
 .. _Add counters for discarded SACK blocks: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=18f02545a9a16c9a89778b91a162ad16d510bb32
 
 * TcpExtTCPSACKDiscard
+
 This counter indicates how many SACK blocks are invalid. If the invalid
 SACK block is caused by ACK recording, the TCP stack will only ignore
 it and won't update this counter.
 
 * TcpExtTCPDSACKIgnoredOld and TcpExtTCPDSACKIgnoredNoUndo
+
 When a DSACK block is invalid, one of these two counters would be
 updated. Which counter will be updated depends on the undo_marker flag
 of the TCP socket. If the undo_marker is not set, the TCP stack isn't
@@ -719,7 +722,7 @@ will be updated. If the undo_marker is set, TcpExtTCPDSACKIgnoredOld
 will be updated. As implied in its name, it might be an old packet.
 
 SACK shift
-=========
+==========
 The linux networking stack stores data in sk_buff struct (skb for
 short). If a SACK block acrosses multiple skb, the TCP stack will try
 to re-arrange data in these skb. E.g. if a SACK block acknowledges seq
@@ -730,12 +733,15 @@ seq 14 to 20. All data in skb2 will be moved to skb1, and skb2 will be
 discard, this operation is 'merge'.
 
 * TcpExtTCPSackShifted
+
 A skb is shifted
 
 * TcpExtTCPSackMerged
+
 A skb is merged
 
 * TcpExtTCPSackShiftFallback
+
 A skb should be shifted or merged, but the TCP stack doesn't do it for
 some reasons.
 
index f015ff786109cb80b8b9bb9a1227e6f5b9cb4678..43b36dbed48e77bffcc94ea79b8a2e6d79ad0aae 100644 (file)
@@ -2356,7 +2356,7 @@ F:        arch/arm/mm/cache-uniphier.c
 F:     arch/arm64/boot/dts/socionext/uniphier*
 F:     drivers/bus/uniphier-system-bus.c
 F:     drivers/clk/uniphier/
-F:     drivers/dmaengine/uniphier-mdmac.c
+F:     drivers/dma/uniphier-mdmac.c
 F:     drivers/gpio/gpio-uniphier.c
 F:     drivers/i2c/busses/i2c-uniphier*
 F:     drivers/irqchip/irq-uniphier-aidet.c
index c0a34064c5744e44ca696b3dd5bdb2ea809384e0..52f067eadc48d1fdb4021926e6811d4d371948a7 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -31,26 +31,12 @@ _all:
 # descending is started. They are now explicitly listed as the
 # prepare rule.
 
-# Ugly workaround for Debian make-kpkg:
-# make-kpkg directly includes the top Makefile of Linux kernel. In such a case,
-# skip sub-make to support debian_* targets in ruleset/kernel_version.mk, but
-# displays warning to discourage such abusage.
-ifneq ($(word 2, $(MAKEFILE_LIST)),)
-$(warning Do not include top Makefile of Linux Kernel)
-sub-make-done := 1
-MAKEFLAGS += -rR
-endif
-
-ifneq ($(sub-make-done),1)
+ifneq ($(sub_make_done),1)
 
 # Do not use make's built-in rules and variables
 # (this increases performance and avoids hard-to-debug behaviour)
 MAKEFLAGS += -rR
 
-# 'MAKEFLAGS += -rR' does not become immediately effective for old
-# GNU Make versions. Cancel implicit rules for this Makefile.
-$(lastword $(MAKEFILE_LIST)): ;
-
 # Avoid funny character set dependencies
 unexport LC_ALL
 LC_COLLATE=C
@@ -153,6 +139,7 @@ $(if $(KBUILD_OUTPUT),, \
 # 'sub-make' below.
 MAKEFLAGS += --include-dir=$(CURDIR)
 
+need-sub-make := 1
 else
 
 # Do not print "Entering directory ..." at all for in-tree build.
@@ -160,6 +147,18 @@ MAKEFLAGS += --no-print-directory
 
 endif # ifneq ($(KBUILD_OUTPUT),)
 
+ifneq ($(filter 3.%,$(MAKE_VERSION)),)
+# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
+# We need to invoke sub-make to avoid implicit rules in the top Makefile.
+need-sub-make := 1
+# Cancel implicit rules for this Makefile.
+$(lastword $(MAKEFILE_LIST)): ;
+endif
+
+export sub_make_done := 1
+
+ifeq ($(need-sub-make),1)
+
 PHONY += $(MAKECMDGOALS) sub-make
 
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
@@ -167,12 +166,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
 
 # Invoke a second make in the output directory, passing relevant variables
 sub-make:
-       $(Q)$(MAKE) sub-make-done=1 \
+       $(Q)$(MAKE) \
        $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
        -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
 
-else # sub-make-done
+endif # need-sub-make
+endif # sub_make_done
+
 # We process the rest of the Makefile if this is the final invocation of make
+ifeq ($(need-sub-make),)
 
 # Do not print "Entering directory ...",
 # but we want to display it when entering to the output directory
@@ -497,7 +499,8 @@ outputmakefile:
 ifneq ($(KBUILD_SRC),)
        $(Q)ln -fsn $(srctree) source
        $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile $(srctree)
-       $(Q){ echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
+       $(Q)test -e .gitignore || \
+       { echo "# this is build directory, ignore it"; echo "*"; } > .gitignore
 endif
 
 ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
@@ -677,7 +680,7 @@ KBUILD_CFLAGS       += $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS  += $(call cc-disable-warning, int-in-bool-context)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
-KBUILD_CFLAGS  += $(call cc-option,-Oz,-Os)
+KBUILD_CFLAGS  += -Os
 else
 KBUILD_CFLAGS   += -O2
 endif
@@ -950,9 +953,11 @@ mod_sign_cmd = true
 endif
 export mod_sign_cmd
 
+HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 ifdef CONFIG_STACK_VALIDATION
   has_libelf := $(call try-run,\
-               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
+               echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
   ifeq ($(has_libelf),1)
     objtool_target := tools/objtool FORCE
   else
@@ -1757,7 +1762,7 @@ existing-targets := $(wildcard $(sort $(targets)))
 
 endif   # ifeq ($(config-targets),1)
 endif   # ifeq ($(mixed-targets),1)
-endif   # sub-make-done
+endif   # need-sub-make
 
 PHONY += FORCE
 FORCE:
index 054ead960f983a99a9f241ce1427fe0e1cd6cb8a..850b4805e2d171436e539b326867d6ce08a6f9d6 100644 (file)
@@ -596,6 +596,7 @@ config ARCH_DAVINCI
        select HAVE_IDE
        select PM_GENERIC_DOMAINS if PM
        select PM_GENERIC_DOMAINS_OF if PM && OF
+       select REGMAP_MMIO
        select RESET_CONTROLLER
        select SPARSE_IRQ
        select USE_OF
index 5641d162dfdb0c106eed6f7f4dc4f7c120930970..28e7513ce61713a084bc5f91f96cc2426d3f50a8 100644 (file)
@@ -93,7 +93,7 @@
 };
 
 &hdmi {
-       hpd-gpios = <&gpio 46 GPIO_ACTIVE_LOW>;
+       hpd-gpios = <&gpio 46 GPIO_ACTIVE_HIGH>;
 };
 
 &pwm {
index b715ab0fa1ffc09c24e101b4f506b9f9bb900550..e8d800fec63790925701a460afa8415c9706d8dc 100644 (file)
                        reg = <2>;
                };
 
-               switch@0 {
+               switch@10 {
                        compatible = "qca,qca8334";
-                       reg = <0>;
+                       reg = <10>;
 
                        switch_ports: ports {
                                #address-cells = <1>;
                                ethphy0: port@0 {
                                        reg = <0>;
                                        label = "cpu";
-                                       phy-mode = "rgmii";
+                                       phy-mode = "rgmii-id";
                                        ethernet = <&fec>;
 
                                        fixed-link {
index 1d1b4bd0670ffd094d2939ed9c91095d8ae8ba39..a4217f564a5347a568830e2032dd3fac2ae1c80f 100644 (file)
        pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
        vmcc-supply = <&reg_sd3_vmmc>;
        cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
-       bus-witdh = <4>;
+       bus-width = <4>;
        no-1-8-v;
        status = "okay";
 };
        pinctrl-1 = <&pinctrl_usdhc4_100mhz>;
        pinctrl-2 = <&pinctrl_usdhc4_200mhz>;
        vmcc-supply = <&reg_sd4_vmmc>;
-       bus-witdh = <8>;
+       bus-width = <8>;
        no-1-8-v;
        non-removable;
        status = "okay";
index 433bf09a1954c5ff05e1f3b3255c326fb69bf615..027df06c5dc7d60c9711ebef8b9333e2fe0c9a58 100644 (file)
@@ -91,6 +91,7 @@
        pinctrl-0 = <&pinctrl_enet>;
        phy-handle = <&ethphy>;
        phy-mode = "rgmii";
+       phy-reset-duration = <10>; /* in msecs */
        phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>;
        phy-supply = <&vdd_eth_io_reg>;
        status = "disabled";
index f6fb6783c1933154049768297372832f68586a04..54cfe72295aa47a278ee8d5ffae5c688b6d8b4fa 100644 (file)
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+/* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Copyright (C) 2016 Freescale Semiconductor, Inc.
  * Copyright (C) 2017 NXP
index 04066f9cb8a31c643cba82ea337dd22cb7a85626..f2f6558a00f188937ca55ee5f44e689da5e1bf7a 100644 (file)
                gpio-sck = <&gpio0 5 GPIO_ACTIVE_HIGH>;
                gpio-mosi = <&gpio0 4 GPIO_ACTIVE_HIGH>;
                /*
-                * It's not actually active high, but the frameworks assume
-                * the polarity of the passed-in GPIO is "normal" (active
-                * high) then actively drives the line low to select the
-                * chip.
+                * This chipselect is active high. Just setting the flags
+                * to GPIO_ACTIVE_HIGH is not enough for the SPI DT bindings,
+                * it will be ignored, only the special "spi-cs-high" flag
+                * really counts.
                 */
                cs-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
+               spi-cs-high;
                num-chipselects = <1>;
 
                /*
index 8661dd9b064a5cdfd4a8801a8b98e9c9f45d7dc0..b37f8e675e4081b200bfd9b9a97d565efce1d1f7 100644 (file)
@@ -170,6 +170,9 @@ CONFIG_IMX_SDMA=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_IIO=y
 CONFIG_FSL_MX25_ADC=y
+CONFIG_PWM=y
+CONFIG_PWM_IMX1=y
+CONFIG_PWM_IMX27=y
 CONFIG_EXT4_FS=y
 # CONFIG_DNOTIFY is not set
 CONFIG_VFAT_FS=y
index 5586a5074a96b6a84165e32f59ea2fa0800b484a..50fb01d70b1030ca6d2f721b30eaa8078894b589 100644 (file)
@@ -398,7 +398,7 @@ CONFIG_MAG3110=y
 CONFIG_MPL3115=y
 CONFIG_PWM=y
 CONFIG_PWM_FSL_FTM=y
-CONFIG_PWM_IMX=y
+CONFIG_PWM_IMX27=y
 CONFIG_NVMEM_IMX_OCOTP=y
 CONFIG_NVMEM_VF610_OCOTP=y
 CONFIG_TEE=y
index bfeb25aaf9a2a7a48857a3896fb682d7d94568a8..326e870d712394fad445033defd8e3ff5975ebdd 100644 (file)
 #include "cpuidle.h"
 #include "hardware.h"
 
-static atomic_t master = ATOMIC_INIT(0);
-static DEFINE_SPINLOCK(master_lock);
+static int num_idle_cpus = 0;
+static DEFINE_SPINLOCK(cpuidle_lock);
 
 static int imx6q_enter_wait(struct cpuidle_device *dev,
                            struct cpuidle_driver *drv, int index)
 {
-       if (atomic_inc_return(&master) == num_online_cpus()) {
-               /*
-                * With this lock, we prevent other cpu to exit and enter
-                * this function again and become the master.
-                */
-               if (!spin_trylock(&master_lock))
-                       goto idle;
+       spin_lock(&cpuidle_lock);
+       if (++num_idle_cpus == num_online_cpus())
                imx6_set_lpm(WAIT_UNCLOCKED);
-               cpu_do_idle();
-               imx6_set_lpm(WAIT_CLOCKED);
-               spin_unlock(&master_lock);
-               goto done;
-       }
+       spin_unlock(&cpuidle_lock);
 
-idle:
        cpu_do_idle();
-done:
-       atomic_dec(&master);
+
+       spin_lock(&cpuidle_lock);
+       if (num_idle_cpus-- == num_online_cpus())
+               imx6_set_lpm(WAIT_CLOCKED);
+       spin_unlock(&cpuidle_lock);
 
        return index;
 }
index c7169c2f94c4fd8cc018caa790c7b170e778eaf3..08c7892866c2df48732d15b9aa64329d0b009b75 100644 (file)
@@ -59,6 +59,7 @@ static void __init imx51_m4if_setup(void)
                return;
 
        m4if_base = of_iomap(np, 0);
+       of_node_put(np);
        if (!m4if_base) {
                pr_err("Unable to map M4IF registers\n");
                return;
index 70498a033cf57408ccdefe374c5fa8e1d22e785d..b5ca9c50876d9a23947dde5d7fe553104c9c0805 100644 (file)
@@ -27,6 +27,7 @@ config ARCH_BCM2835
        bool "Broadcom BCM2835 family"
        select TIMER_OF
        select GPIOLIB
+       select MFD_CORE
        select PINCTRL
        select PINCTRL_BCM2835
        select ARM_AMBA
index bb2045be8814036ddced1d4a7ec5b42951343832..97aeb946ed5e7473639ec94a498512d48a12ca8b 100644 (file)
                nvidia,default-trim = <0x9>;
                nvidia,dqs-trim = <63>;
                mmc-hs400-1_8v;
-               supports-cqe;
                status = "disabled";
        };
 
index 61a0afb74e6310b2b4c16bcf9939f6eab7db6258..1ea684af99c4a19b674f2ab90e38680584b09cf4 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the RZ/G2E (R8A774C0) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a774c0-cpg-mssr.h>
                                 <&cpg CPG_CORE R8A774C0_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A774C0_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index a69faa60ea4da4bb06a257af39881138a026c6d1..d2ad665fe2d925db040e50d2d9341b5535ddd167 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Device Tree Source for the R-Car E3 (R8A77990) SoC
  *
- * Copyright (C) 2018 Renesas Electronics Corp.
+ * Copyright (C) 2018-2019 Renesas Electronics Corp.
  */
 
 #include <dt-bindings/clock/r8a77990-cpg-mssr.h>
                                 <&cpg CPG_CORE R8A77990_CLK_S3D1C>,
                                 <&scif_clk>;
                        clock-names = "fck", "brg_int", "scif_clk";
-                       dmas = <&dmac1 0x5b>, <&dmac1 0x5a>,
-                              <&dmac2 0x5b>, <&dmac2 0x5a>;
-                       dma-names = "tx", "rx", "tx", "rx";
+                       dmas = <&dmac0 0x5b>, <&dmac0 0x5a>;
+                       dma-names = "tx", "rx";
                        power-domains = <&sysc R8A77990_PD_ALWAYS_ON>;
                        resets = <&cpg 202>;
                        status = "disabled";
index f8482fe5a190f47937ee188aa7ff3cbf67bcf2a2..413d566405d175ee882fc4f29a017a6fd39ce0b6 100644 (file)
@@ -217,7 +217,7 @@ static void __init request_standard_resources(void)
 
        num_standard_resources = memblock.memory.cnt;
        res_size = num_standard_resources * sizeof(*standard_resources);
-       standard_resources = memblock_alloc_low(res_size, SMP_CACHE_BYTES);
+       standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
        if (!standard_resources)
                panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
 
index c5698a523bb189dee5650398603c4ac8a0f5bc27..23f7ed796f38829a054b5c0851b04e581990bcbf 100644 (file)
 /* Misc instructions for BPF compiler */
 #define PPC_INST_LBZ                   0x88000000
 #define PPC_INST_LD                    0xe8000000
+#define PPC_INST_LDX                   0x7c00002a
 #define PPC_INST_LHZ                   0xa0000000
 #define PPC_INST_LWZ                   0x80000000
 #define PPC_INST_LHBRX                 0x7c00062c
 #define PPC_INST_STB                   0x98000000
 #define PPC_INST_STH                   0xb0000000
 #define PPC_INST_STD                   0xf8000000
+#define PPC_INST_STDX                  0x7c00012a
 #define PPC_INST_STDU                  0xf8000001
 #define PPC_INST_STW                   0x90000000
 #define PPC_INST_STWU                  0x94000000
index 844d8e774492e65929168bfff4d0655fa50dda74..b7f6f6e0b6e801c6cf0fbb1d11d5c0d53014fb4d 100644 (file)
@@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
        beq     .Lzero
 
 .Lcmp_rest_lt8bytes:
-       /* Here we have only less than 8 bytes to compare with. at least s1
-        * Address is aligned with 8 bytes.
-        * The next double words are load and shift right with appropriate
-        * bits.
+       /*
+        * Here we have less than 8 bytes to compare. At least s1 is aligned to
+        * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
+        * page boundary, otherwise we might read past the end of the buffer and
+        * trigger a page fault. We use 4K as the conservative minimum page
+        * size. If we detect that case we go to the byte-by-byte loop.
+        *
+        * Otherwise the next double word is loaded from s1 and s2, and shifted
+        * right to compare the appropriate bits.
         */
+       clrldi  r6,r4,(64-12)   // r6 = r4 & 0xfff
+       cmpdi   r6,0xff8
+       bgt     .Lshort
+
        subfic  r6,r5,8
        slwi    r6,r6,3
        LD      rA,0,r3
index 549e9490ff2aabd79e7e8a7acc970d8a599cba79..dcac37745b05cfcc70b89fafcda172e3406956a0 100644 (file)
@@ -51,6 +51,8 @@
 #define PPC_LIS(r, i)          PPC_ADDIS(r, 0, i)
 #define PPC_STD(r, base, i)    EMIT(PPC_INST_STD | ___PPC_RS(r) |            \
                                     ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_STDX(r, base, b)   EMIT(PPC_INST_STDX | ___PPC_RS(r) |           \
+                                    ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_STDU(r, base, i)   EMIT(PPC_INST_STDU | ___PPC_RS(r) |           \
                                     ___PPC_RA(base) | ((i) & 0xfffc))
 #define PPC_STW(r, base, i)    EMIT(PPC_INST_STW | ___PPC_RS(r) |            \
@@ -65,7 +67,9 @@
 #define PPC_LBZ(r, base, i)    EMIT(PPC_INST_LBZ | ___PPC_RT(r) |            \
                                     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LD(r, base, i)     EMIT(PPC_INST_LD | ___PPC_RT(r) |             \
-                                    ___PPC_RA(base) | IMM_L(i))
+                                    ___PPC_RA(base) | ((i) & 0xfffc))
+#define PPC_LDX(r, base, b)    EMIT(PPC_INST_LDX | ___PPC_RT(r) |            \
+                                    ___PPC_RA(base) | ___PPC_RB(b))
 #define PPC_LWZ(r, base, i)    EMIT(PPC_INST_LWZ | ___PPC_RT(r) |            \
                                     ___PPC_RA(base) | IMM_L(i))
 #define PPC_LHZ(r, base, i)    EMIT(PPC_INST_LHZ | ___PPC_RT(r) |            \
                                        ___PPC_RA(a) | ___PPC_RB(b))
 #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) |          \
                                        ___PPC_RA(a) | ___PPC_RB(b))
-
-#ifdef CONFIG_PPC64
-#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
-#else
-#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
-#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
-#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
-#endif
-
 #define PPC_CMPWI(a, i)                EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPDI(a, i)                EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
 #define PPC_CMPW(a, b)         EMIT(PPC_INST_CMPW | ___PPC_RA(a) |           \
index dc50a8d4b3b972a479aa2b00b1ea1c46db2977e2..21744d8aa053118f138f4a98d4097da2b2262fa6 100644 (file)
@@ -122,6 +122,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
 #define PPC_NTOHS_OFFS(r, base, i)     PPC_LHZ_OFFS(r, base, i)
 #endif
 
+#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
+#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
+
 #define SEEN_DATAREF 0x10000 /* might call external helpers */
 #define SEEN_XREG    0x20000 /* X reg is used */
 #define SEEN_MEM     0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
index 3609be4692b35e948f3ceff98c39c1b3bdaea744..47f441f351a6211c854ab3e7569a8ffa9e3dd943 100644 (file)
@@ -68,6 +68,26 @@ static const int b2p[] = {
 /* PPC NVR range -- update this if we ever use NVRs below r27 */
 #define BPF_PPC_NVR_MIN                27
 
+/*
+ * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
+ * so ensure that it isn't in use already.
+ */
+#define PPC_BPF_LL(r, base, i) do {                                          \
+                               if ((i) % 4) {                                \
+                                       PPC_LI(b2p[TMP_REG_2], (i));          \
+                                       PPC_LDX(r, base, b2p[TMP_REG_2]);     \
+                               } else                                        \
+                                       PPC_LD(r, base, i);                   \
+                               } while(0)
+#define PPC_BPF_STL(r, base, i) do {                                         \
+                               if ((i) % 4) {                                \
+                                       PPC_LI(b2p[TMP_REG_2], (i));          \
+                                       PPC_STDX(r, base, b2p[TMP_REG_2]);    \
+                               } else                                        \
+                                       PPC_STD(r, base, i);                  \
+                               } while(0)
+#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
+
 #define SEEN_FUNC      0x1000 /* might call external helpers */
 #define SEEN_STACK     0x2000 /* uses BPF stack */
 #define SEEN_TAILCALL  0x4000 /* uses tail calls */
index 4194d3cfb60cd0702487a83174f85b29806520ec..21a1dcd4b156c4bc926eccd5bd2375383a7705d2 100644 (file)
@@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
         * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
         *   goto out;
         */
-       PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
+       PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
        PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
        PPC_BCC(COND_GT, out);
 
@@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        /* prog = array->ptrs[index]; */
        PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
        PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
-       PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
+       PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
 
        /*
         * if (prog == NULL)
@@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
        PPC_BCC(COND_EQ, out);
 
        /* goto *(prog->bpf_func + prologue_size); */
-       PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
+       PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
 #ifdef PPC64_ELF_ABI_v1
        /* skip past the function descriptor */
        PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
@@ -606,7 +606,7 @@ bpf_alu32_trunc:
                                 * the instructions generated will remain the
                                 * same across all passes
                                 */
-                               PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
+                               PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
                                PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
                                PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
                                break;
@@ -662,7 +662,7 @@ emit_clear:
                                PPC_LI32(b2p[TMP_REG_1], imm);
                                src_reg = b2p[TMP_REG_1];
                        }
-                       PPC_STD(src_reg, dst_reg, off);
+                       PPC_BPF_STL(src_reg, dst_reg, off);
                        break;
 
                /*
@@ -709,7 +709,7 @@ emit_clear:
                        break;
                /* dst = *(u64 *)(ul) (src + off) */
                case BPF_LDX | BPF_MEM | BPF_DW:
-                       PPC_LD(dst_reg, src_reg, off);
+                       PPC_BPF_LL(dst_reg, src_reg, off);
                        break;
 
                /*
index 6ed22127391b6d0a7789bb363476452bf0991a65..921f12182f3e01a850372fd51b0a88a5bede296c 100644 (file)
@@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
 
                ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
        } else {
-               const __be32 *indexes;
-
-               indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
-               if (indexes == NULL)
-                       goto err_of_node_put;
+               u32 nr_drc_indexes, thread_drc_index;
 
                /*
-                * The first element indexes[0] is the number of drc_indexes
-                * returned in the list.  Hence thread_index+1 will get the
-                * drc_index corresponding to core number thread_index.
+                * The first element of ibm,drc-indexes array is the
+                * number of drc_indexes returned in the list.  Hence
+                * thread_index+1 will get the drc_index corresponding
+                * to core number thread_index.
                 */
-               ret = indexes[thread_index + 1];
+               rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+                                               0, &nr_drc_indexes);
+               if (rc)
+                       goto err_of_node_put;
+
+               WARN_ON_ONCE(thread_index > nr_drc_indexes);
+               rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
+                                               thread_index + 1,
+                                               &thread_drc_index);
+               if (rc)
+                       goto err_of_node_put;
+
+               ret = thread_drc_index;
        }
 
        rc = 0;
index d97d52772789b70187c5a2336c6fdee76fc48154..452dcfd7e5dd15083715eceef980642fdfb83efd 100644 (file)
@@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
                "UE",
                "SLB",
                "ERAT",
+               "Unknown",
                "TLB",
                "D-Cache",
                "Unknown",
index 1a6a7092d94209d4ee330003cfd3d2ccf713b916..e94a0a28b5ebe22b944ea73b1ac48bdcf52d9e63 100644 (file)
@@ -360,4 +360,15 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
        return reg1;
 }
 
+/*
+ * Interface to tell the AP bus code that a configuration
+ * change has happened. The bus code should at least do
+ * an ap bus resource rescan.
+ */
+#if IS_ENABLED(CONFIG_ZCRYPT)
+void ap_bus_cfg_chg(void);
+#else
+static inline void ap_bus_cfg_chg(void){};
+#endif
+
 #endif /* _ASM_S390_AP_H_ */
index 7d22a474a040ddd3d0e76c84075db6ab17bb2263..f74639a05f0ffc33f638c264af58c48933e36139 100644 (file)
@@ -252,11 +252,14 @@ do {                                                              \
 
 /*
  * Cache aliasing on the latest machines calls for a mapping granularity
- * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
- * of up to 1GB. For 31-bit processes the virtual address space is limited,
- * use no alignment and limit the randomization to 8MB.
+ * of 512KB for the anonymous mapping base. For 64-bit processes use a
+ * 512KB alignment and a randomization of up to 1GB. For 31-bit processes
+ * the virtual address space is limited, use no alignment and limit the
+ * randomization to 8MB.
+ * For the additional randomization of the program break use 32MB for
+ * 64-bit and 8MB for 31-bit.
  */
-#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x3ffffUL)
+#define BRK_RND_MASK   (is_compat_task() ? 0x7ffUL : 0x1fffUL)
 #define MMAP_RND_MASK  (is_compat_task() ? 0x7ffUL : 0x3ff80UL)
 #define MMAP_ALIGN_MASK        (is_compat_task() ? 0 : 0x7fUL)
 #define STACK_RND_MASK MMAP_RND_MASK
index cc0947e08b6ffef09419a52eb04f817535016127..5b9f10b1e55dec03c2878a6ab510cb0d128002e5 100644 (file)
@@ -91,52 +91,53 @@ struct lowcore {
        __u64   hardirq_timer;                  /* 0x02e8 */
        __u64   softirq_timer;                  /* 0x02f0 */
        __u64   steal_timer;                    /* 0x02f8 */
-       __u64   last_update_timer;              /* 0x0300 */
-       __u64   last_update_clock;              /* 0x0308 */
-       __u64   int_clock;                      /* 0x0310 */
-       __u64   mcck_clock;                     /* 0x0318 */
-       __u64   clock_comparator;               /* 0x0320 */
-       __u64   boot_clock[2];                  /* 0x0328 */
+       __u64   avg_steal_timer;                /* 0x0300 */
+       __u64   last_update_timer;              /* 0x0308 */
+       __u64   last_update_clock;              /* 0x0310 */
+       __u64   int_clock;                      /* 0x0318*/
+       __u64   mcck_clock;                     /* 0x0320 */
+       __u64   clock_comparator;               /* 0x0328 */
+       __u64   boot_clock[2];                  /* 0x0330 */
 
        /* Current process. */
-       __u64   current_task;                   /* 0x0338 */
-       __u64   kernel_stack;                   /* 0x0340 */
+       __u64   current_task;                   /* 0x0340 */
+       __u64   kernel_stack;                   /* 0x0348 */
 
        /* Interrupt, DAT-off and restartstack. */
-       __u64   async_stack;                    /* 0x0348 */
-       __u64   nodat_stack;                    /* 0x0350 */
-       __u64   restart_stack;                  /* 0x0358 */
+       __u64   async_stack;                    /* 0x0350 */
+       __u64   nodat_stack;                    /* 0x0358 */
+       __u64   restart_stack;                  /* 0x0360 */
 
        /* Restart function and parameter. */
-       __u64   restart_fn;                     /* 0x0360 */
-       __u64   restart_data;                   /* 0x0368 */
-       __u64   restart_source;                 /* 0x0370 */
+       __u64   restart_fn;                     /* 0x0368 */
+       __u64   restart_data;                   /* 0x0370 */
+       __u64   restart_source;                 /* 0x0378 */
 
        /* Address space pointer. */
-       __u64   kernel_asce;                    /* 0x0378 */
-       __u64   user_asce;                      /* 0x0380 */
-       __u64   vdso_asce;                      /* 0x0388 */
+       __u64   kernel_asce;                    /* 0x0380 */
+       __u64   user_asce;                      /* 0x0388 */
+       __u64   vdso_asce;                      /* 0x0390 */
 
        /*
         * The lpp and current_pid fields form a
         * 64-bit value that is set as program
         * parameter with the LPP instruction.
         */
-       __u32   lpp;                            /* 0x0390 */
-       __u32   current_pid;                    /* 0x0394 */
+       __u32   lpp;                            /* 0x0398 */
+       __u32   current_pid;                    /* 0x039c */
 
        /* SMP info area */
-       __u32   cpu_nr;                         /* 0x0398 */
-       __u32   softirq_pending;                /* 0x039c */
-       __u32   preempt_count;                  /* 0x03a0 */
-       __u32   spinlock_lockval;               /* 0x03a4 */
-       __u32   spinlock_index;                 /* 0x03a8 */
-       __u32   fpu_flags;                      /* 0x03ac */
-       __u64   percpu_offset;                  /* 0x03b0 */
-       __u64   vdso_per_cpu_data;              /* 0x03b8 */
-       __u64   machine_flags;                  /* 0x03c0 */
-       __u64   gmap;                           /* 0x03c8 */
-       __u8    pad_0x03d0[0x0400-0x03d0];      /* 0x03d0 */
+       __u32   cpu_nr;                         /* 0x03a0 */
+       __u32   softirq_pending;                /* 0x03a4 */
+       __u32   preempt_count;                  /* 0x03a8 */
+       __u32   spinlock_lockval;               /* 0x03ac */
+       __u32   spinlock_index;                 /* 0x03b0 */
+       __u32   fpu_flags;                      /* 0x03b4 */
+       __u64   percpu_offset;                  /* 0x03b8 */
+       __u64   vdso_per_cpu_data;              /* 0x03c0 */
+       __u64   machine_flags;                  /* 0x03c8 */
+       __u64   gmap;                           /* 0x03d0 */
+       __u8    pad_0x03d8[0x0400-0x03d8];      /* 0x03d8 */
 
        /* br %r1 trampoline */
        __u16   br_r1_trampoline;               /* 0x0400 */
index c6fad208c2fa5a8ffaad40d554c7597097d3e4fa..b6854812d2ed56f11cbd03865c16b26290518611 100644 (file)
@@ -196,23 +196,30 @@ static void cf_diag_perf_event_destroy(struct perf_event *event)
  */
 static int __hw_perf_event_init(struct perf_event *event)
 {
-       struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
        struct perf_event_attr *attr = &event->attr;
+       struct cpu_cf_events *cpuhw;
        enum cpumf_ctr_set i;
        int err = 0;
 
-       debug_sprintf_event(cf_diag_dbg, 5,
-                           "%s event %p cpu %d authorized %#x\n", __func__,
-                           event, event->cpu, cpuhw->info.auth_ctl);
+       debug_sprintf_event(cf_diag_dbg, 5, "%s event %p cpu %d\n", __func__,
+                           event, event->cpu);
 
        event->hw.config = attr->config;
        event->hw.config_base = 0;
-       local64_set(&event->count, 0);
 
-       /* Add all authorized counter sets to config_base */
+       /* Add all authorized counter sets to config_base. The
+        * the hardware init function is either called per-cpu or just once
+        * for all CPUS (event->cpu == -1).  This depends on the whether
+        * counting is started for all CPUs or on a per workload base where
+        * the perf event moves from one CPU to another CPU.
+        * Checking the authorization on any CPU is fine as the hardware
+        * applies the same authorization settings to all CPUs.
+        */
+       cpuhw = &get_cpu_var(cpu_cf_events);
        for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
                if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
                        event->hw.config_base |= cpumf_ctr_ctl[i];
+       put_cpu_var(cpu_cf_events);
 
        /* No authorized counter sets, nothing to count/sample */
        if (!event->hw.config_base) {
index 3fe1c77c361b98a9a4443bf1a2941f486d024030..bd197baf1dc337f018af35eeb19635b1c95998b7 100644 (file)
@@ -266,7 +266,8 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
        lc->percpu_offset = __per_cpu_offset[cpu];
        lc->kernel_asce = S390_lowcore.kernel_asce;
        lc->machine_flags = S390_lowcore.machine_flags;
-       lc->user_timer = lc->system_timer = lc->steal_timer = 0;
+       lc->user_timer = lc->system_timer =
+               lc->steal_timer = lc->avg_steal_timer = 0;
        __ctl_store(lc->cregs_save_area, 0, 15);
        save_access_regs((unsigned int *) lc->access_regs_save_area);
        memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
index 98f850e00008e99a1e64e8f20a74bbaaf4910636..a69a0911ed0e82720b10b124d0153681f2c821ea 100644 (file)
@@ -124,7 +124,7 @@ static void account_system_index_scaled(struct task_struct *p, u64 cputime,
  */
 static int do_account_vtime(struct task_struct *tsk)
 {
-       u64 timer, clock, user, guest, system, hardirq, softirq, steal;
+       u64 timer, clock, user, guest, system, hardirq, softirq;
 
        timer = S390_lowcore.last_update_timer;
        clock = S390_lowcore.last_update_clock;
@@ -182,12 +182,6 @@ static int do_account_vtime(struct task_struct *tsk)
        if (softirq)
                account_system_index_scaled(tsk, softirq, CPUTIME_SOFTIRQ);
 
-       steal = S390_lowcore.steal_timer;
-       if ((s64) steal > 0) {
-               S390_lowcore.steal_timer = 0;
-               account_steal_time(cputime_to_nsecs(steal));
-       }
-
        return virt_timer_forward(user + guest + system + hardirq + softirq);
 }
 
@@ -213,8 +207,19 @@ void vtime_task_switch(struct task_struct *prev)
  */
 void vtime_flush(struct task_struct *tsk)
 {
+       u64 steal, avg_steal;
+
        if (do_account_vtime(tsk))
                virt_timer_expire();
+
+       steal = S390_lowcore.steal_timer;
+       avg_steal = S390_lowcore.avg_steal_timer / 2;
+       if ((s64) steal > 0) {
+               S390_lowcore.steal_timer = 0;
+               account_steal_time(steal);
+               avg_steal += steal;
+       }
+       S390_lowcore.avg_steal_timer = avg_steal;
 }
 
 /*
index 6e0f2d97fc6d8f0a5b14e6dbea23f817706bef7a..d95f9489201526081abf8b8bdcc65a525cf4c179 100644 (file)
@@ -220,7 +220,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
                blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
                flush_rq->tag = -1;
        } else {
-               blk_mq_put_driver_tag_hctx(hctx, flush_rq);
+               blk_mq_put_driver_tag(flush_rq);
                flush_rq->internal_tag = -1;
        }
 
@@ -324,7 +324,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
 
        if (q->elevator) {
                WARN_ON(rq->tag < 0);
-               blk_mq_put_driver_tag_hctx(hctx, rq);
+               blk_mq_put_driver_tag(rq);
        }
 
        /*
index 70b210a308c452b43abd1a270e759f5e331ab55f..3ff3d7b4996973458fa44a89133ed4ec5b65b2d4 100644 (file)
@@ -59,7 +59,8 @@ static int blk_mq_poll_stats_bkt(const struct request *rq)
 }
 
 /*
- * Check if any of the ctx's have pending work in this hardware queue
+ * Check if any of the ctx, dispatch list or elevator
+ * have pending work in this hardware queue.
  */
 static bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx)
 {
@@ -1071,7 +1072,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
        hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
 
        spin_lock(&hctx->dispatch_wait_lock);
-       list_del_init(&wait->entry);
+       if (!list_empty(&wait->entry)) {
+               struct sbitmap_queue *sbq;
+
+               list_del_init(&wait->entry);
+               sbq = &hctx->tags->bitmap_tags;
+               atomic_dec(&sbq->ws_active);
+       }
        spin_unlock(&hctx->dispatch_wait_lock);
 
        blk_mq_run_hw_queue(hctx, true);
@@ -1087,6 +1094,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                                 struct request *rq)
 {
+       struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
        struct wait_queue_head *wq;
        wait_queue_entry_t *wait;
        bool ret;
@@ -1109,7 +1117,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
        if (!list_empty_careful(&wait->entry))
                return false;
 
-       wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
+       wq = &bt_wait_ptr(sbq, hctx)->wait;
 
        spin_lock_irq(&wq->lock);
        spin_lock(&hctx->dispatch_wait_lock);
@@ -1119,6 +1127,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
                return false;
        }
 
+       atomic_inc(&sbq->ws_active);
        wait->flags &= ~WQ_FLAG_EXCLUSIVE;
        __add_wait_queue(wq, wait);
 
@@ -1139,6 +1148,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
         * someone else gets the wakeup.
         */
        list_del_init(&wait->entry);
+       atomic_dec(&sbq->ws_active);
        spin_unlock(&hctx->dispatch_wait_lock);
        spin_unlock_irq(&wq->lock);
 
index 0ed8e5a8729fccd39d5da0e58854bdb5c5de42da..d704fc7766f45458fd7f186a0111c859e09baafc 100644 (file)
@@ -224,15 +224,6 @@ static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
        }
 }
 
-static inline void blk_mq_put_driver_tag_hctx(struct blk_mq_hw_ctx *hctx,
-                                      struct request *rq)
-{
-       if (rq->tag == -1 || rq->internal_tag == -1)
-               return;
-
-       __blk_mq_put_driver_tag(hctx, rq);
-}
-
 static inline void blk_mq_put_driver_tag(struct request *rq)
 {
        if (rq->tag == -1 || rq->internal_tag == -1)
index 6ecbbabf12330c316d3e28cdbef52e72548b6ef3..eec263c9019e4bd9d200cf6903857f620da962fb 100644 (file)
@@ -1043,9 +1043,6 @@ void __init acpi_early_init(void)
 
        acpi_permanent_mmap = true;
 
-       /* Initialize debug output. Linux does not use ACPICA defaults */
-       acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR;
-
 #ifdef CONFIG_X86
        /*
         * If the machine falls into the DMI check table,
index 1b207fca1420bbc5c40469e1b3558cb38a57c450..d4244e7d0e38f05cef4e5b97df55d3a9946837c1 100644 (file)
@@ -1150,8 +1150,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
        cpc_read(cpunum, nominal_reg, &nom);
        perf_caps->nominal_perf = nom;
 
-       cpc_read(cpunum, guaranteed_reg, &guaranteed);
-       perf_caps->guaranteed_perf = guaranteed;
+       if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
+           IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
+               perf_caps->guaranteed_perf = 0;
+       } else {
+               cpc_read(cpunum, guaranteed_reg, &guaranteed);
+               perf_caps->guaranteed_perf = guaranteed;
+       }
 
        cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
        perf_caps->lowest_nonlinear_perf = min_nonlinear;
index 8685882da64cdaf60dcbac09d9c61735905b5300..4b9c7ca492e6db85dad979a67c7baed7cedd972d 100644 (file)
@@ -2057,7 +2057,8 @@ static size_t binder_get_object(struct binder_proc *proc,
        size_t object_size = 0;
 
        read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
-       if (read_size < sizeof(*hdr) || !IS_ALIGNED(offset, sizeof(u32)))
+       if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
+           !IS_ALIGNED(offset, sizeof(u32)))
                return 0;
        binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
                                      offset, read_size);
index 6389467670a0bc171522a2035ae4788bb700d616..195f120c4e8c9aefa9f6e57e8ce400a8ddde95fb 100644 (file)
@@ -927,14 +927,13 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        index = page - alloc->pages;
        page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
+
+       mm = alloc->vma_vm_mm;
+       if (!mmget_not_zero(mm))
+               goto err_mmget;
+       if (!down_write_trylock(&mm->mmap_sem))
+               goto err_down_write_mmap_sem_failed;
        vma = binder_alloc_get_vma(alloc);
-       if (vma) {
-               if (!mmget_not_zero(alloc->vma_vm_mm))
-                       goto err_mmget;
-               mm = alloc->vma_vm_mm;
-               if (!down_read_trylock(&mm->mmap_sem))
-                       goto err_down_write_mmap_sem_failed;
-       }
 
        list_lru_isolate(lru, item);
        spin_unlock(lock);
@@ -945,10 +944,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
                zap_page_range(vma, page_addr, PAGE_SIZE);
 
                trace_binder_unmap_user_end(alloc, index);
-
-               up_read(&mm->mmap_sem);
-               mmput(mm);
        }
+       up_write(&mm->mmap_sem);
+       mmput(mm);
 
        trace_binder_unmap_kernel_start(alloc, index);
 
index b3ed8f9953a862ea3ae67ef065ca5469330a44e0..173e6f2dd9af0f12afdc1fee7e372cfa4291e0aa 100644 (file)
@@ -52,38 +52,52 @@ static int eject_tray(struct ata_device *dev)
 /* Per the spec, only slot type and drawer type ODD can be supported */
 static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
 {
-       char buf[16];
+       char *buf;
        unsigned int ret;
-       struct rm_feature_desc *desc = (void *)(buf + 8);
+       struct rm_feature_desc *desc;
        struct ata_taskfile tf;
        static const char cdb[] = {  GPCMD_GET_CONFIGURATION,
                        2,      /* only 1 feature descriptor requested */
                        0, 3,   /* 3, removable medium feature */
                        0, 0, 0,/* reserved */
-                       0, sizeof(buf),
+                       0, 16,
                        0, 0, 0,
        };
 
+       buf = kzalloc(16, GFP_KERNEL);
+       if (!buf)
+               return ODD_MECH_TYPE_UNSUPPORTED;
+       desc = (void *)(buf + 8);
+
        ata_tf_init(dev, &tf);
        tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
        tf.command = ATA_CMD_PACKET;
        tf.protocol = ATAPI_PROT_PIO;
-       tf.lbam = sizeof(buf);
+       tf.lbam = 16;
 
        ret = ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
-                               buf, sizeof(buf), 0);
-       if (ret)
+                               buf, 16, 0);
+       if (ret) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (be16_to_cpu(desc->feature_code) != 3)
+       if (be16_to_cpu(desc->feature_code) != 3) {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 
-       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1)
+       if (desc->mech_type == 0 && desc->load == 0 && desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_SLOT;
-       else if (desc->mech_type == 1 && desc->load == 0 && desc->eject == 1)
+       } else if (desc->mech_type == 1 && desc->load == 0 &&
+                  desc->eject == 1) {
+               kfree(buf);
                return ODD_MECH_TYPE_DRAWER;
-       else
+       } else {
+               kfree(buf);
                return ODD_MECH_TYPE_UNSUPPORTED;
+       }
 }
 
 /* Test if ODD is zero power ready by sense code */
index e7a5f1d1c3141acf9ef79c79fc5829886ef73245..399cad7daae77b37508033ec1cac61bebefbc550 100644 (file)
@@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
        struct zram *zram = dev_to_zram(dev);
        unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
        int index;
-       char mode_buf[8];
-       ssize_t sz;
 
-       sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-       if (sz <= 0)
-               return -EINVAL;
-
-       /* ignore trailing new line */
-       if (mode_buf[sz - 1] == '\n')
-               mode_buf[sz - 1] = 0x00;
-
-       if (strcmp(mode_buf, "all"))
+       if (!sysfs_streq(buf, "all"))
                return -EINVAL;
 
        down_read(&zram->init_lock);
@@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
        struct bio bio;
        struct bio_vec bio_vec;
        struct page *page;
-       ssize_t ret, sz;
-       char mode_buf[8];
-       int mode = -1;
+       ssize_t ret;
+       int mode;
        unsigned long blk_idx = 0;
 
-       sz = strscpy(mode_buf, buf, sizeof(mode_buf));
-       if (sz <= 0)
-               return -EINVAL;
-
-       /* ignore trailing newline */
-       if (mode_buf[sz - 1] == '\n')
-               mode_buf[sz - 1] = 0x00;
-
-       if (!strcmp(mode_buf, "idle"))
+       if (sysfs_streq(buf, "idle"))
                mode = IDLE_WRITEBACK;
-       else if (!strcmp(mode_buf, "huge"))
+       else if (sysfs_streq(buf, "huge"))
                mode = HUGE_WRITEBACK;
-
-       if (mode == -1)
+       else
                return -EINVAL;
 
        down_read(&zram->init_lock);
index e22f0dbaebb1d97e407d0be2b67da9b25f8af8dc..b599c7318aab4ea9302779b8527ece4a91151914 100644 (file)
@@ -385,7 +385,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
        if (ret)
                return ret;
 
-       return cppc_perf.guaranteed_perf;
+       if (cppc_perf.guaranteed_perf)
+               return cppc_perf.guaranteed_perf;
+
+       return cppc_perf.nominal_perf;
 }
 
 #else /* CONFIG_ACPI_CPPC_LIB */
index 3f49427766b8810cb361ce2d8a1b8331541da5c0..2b51e0718c9f6e493b8659a2ad3dd24de8eaab0d 100644 (file)
@@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
 
        clk_put(priv->clk);
        dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
-       kfree(priv);
        dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
+       kfree(priv);
 
        return 0;
 }
index 4e0eede599a8dedd8c306a65c12366a5c857a4ae..ac0301b695937c1168cac2055a5af41d47536379 100644 (file)
@@ -1578,11 +1578,9 @@ static int stm32_mdma_probe(struct platform_device *pdev)
 
        dmadev->nr_channels = nr_channels;
        dmadev->nr_requests = nr_requests;
-       ret = device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
+       device_property_read_u32_array(&pdev->dev, "st,ahb-addr-masks",
                                       dmadev->ahb_addr_masks,
                                       count);
-       if (ret)
-               return ret;
        dmadev->nr_ahb_addr_masks = count;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
index 91b90c0cea731778bd524a13d801433e7df0a2ab..12acdac858208979438491e90c3782b064f9e952 100644 (file)
@@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
        if (err < 0)
                goto out;
 
-       if (err & BIT(pos))
-               err = -EACCES;
+       if (value & BIT(pos)) {
+               err = -EPERM;
+               goto out;
+       }
 
        err = 0;
 
index 854bce4fb9e7209b2697e9c662e65e4456c969c3..217507002dbc38ce7a34c64df751ac5887d304d6 100644 (file)
@@ -1224,6 +1224,8 @@ static int __init aspeed_gpio_probe(struct platform_device *pdev)
 
        gpio->offset_timer =
                devm_kzalloc(&pdev->dev, gpio->chip.ngpio, GFP_KERNEL);
+       if (!gpio->offset_timer)
+               return -ENOMEM;
 
        return aspeed_gpio_setup_irqs(gpio, pdev);
 }
index 0ecd2369c2cad0daa5e08696ab85b91af5235a26..a09d2f9ebacc8d4909d79119333e344453ea6e0a 100644 (file)
@@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
        mutex_init(&exar_gpio->lock);
 
        index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
+       if (index < 0)
+               goto err_destroy;
 
        sprintf(exar_gpio->name, "exar_gpio%d", index);
        exar_gpio->gpio_chip.label = exar_gpio->name;
index 154d959e899323dcea54b2829c6a70a4127bf579..b6a4efce7c9285f0a26411246d615c90f498d0be 100644 (file)
@@ -204,8 +204,8 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
        struct gpio_mockup_chip *chip;
        struct seq_file *sfile;
        struct gpio_chip *gc;
+       int val, cnt;
        char buf[3];
-       int val, rv;
 
        if (*ppos != 0)
                return 0;
@@ -216,13 +216,9 @@ static ssize_t gpio_mockup_debugfs_read(struct file *file,
        gc = &chip->gc;
 
        val = gpio_mockup_get(gc, priv->offset);
-       snprintf(buf, sizeof(buf), "%d\n", val);
+       cnt = snprintf(buf, sizeof(buf), "%d\n", val);
 
-       rv = copy_to_user(usr_buf, buf, sizeof(buf));
-       if (rv)
-               return rv;
-
-       return sizeof(buf) - 1;
+       return simple_read_from_buffer(usr_buf, size, ppos, buf, cnt);
 }
 
 static ssize_t gpio_mockup_debugfs_write(struct file *file,
index 8b9c3ab70f6eade458a501184ce764454819043d..6a3ec575a404ed9fa3dfcf8c78e56381789a8c02 100644 (file)
@@ -120,7 +120,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
         * to determine if the flags should have inverted semantics.
         */
        if (IS_ENABLED(CONFIG_SPI_MASTER) &&
-           of_property_read_bool(np, "cs-gpios")) {
+           of_property_read_bool(np, "cs-gpios") &&
+           !strcmp(propname, "cs-gpios")) {
                struct device_node *child;
                u32 cs;
                int ret;
@@ -142,16 +143,16 @@ static void of_gpio_flags_quirks(struct device_node *np,
                                 * conflict and the "spi-cs-high" flag will
                                 * take precedence.
                                 */
-                               if (of_property_read_bool(np, "spi-cs-high")) {
+                               if (of_property_read_bool(child, "spi-cs-high")) {
                                        if (*flags & OF_GPIO_ACTIVE_LOW) {
                                                pr_warn("%s GPIO handle specifies active low - ignored\n",
-                                                       of_node_full_name(np));
+                                                       of_node_full_name(child));
                                                *flags &= ~OF_GPIO_ACTIVE_LOW;
                                        }
                                } else {
                                        if (!(*flags & OF_GPIO_ACTIVE_LOW))
                                                pr_info("%s enforce active low on chipselect handle\n",
-                                                       of_node_full_name(np));
+                                                       of_node_full_name(child));
                                        *flags |= OF_GPIO_ACTIVE_LOW;
                                }
                                break;
@@ -717,7 +718,13 @@ int of_gpiochip_add(struct gpio_chip *chip)
 
        of_node_get(chip->of_node);
 
-       return of_gpiochip_scan_gpios(chip);
+       status = of_gpiochip_scan_gpios(chip);
+       if (status) {
+               of_node_put(chip->of_node);
+               gpiochip_remove_pin_ranges(chip);
+       }
+
+       return status;
 }
 
 void of_gpiochip_remove(struct gpio_chip *chip)
index 144af07335815998c7238b72e01882cf26692ef8..0495bf1d480a4cfe464e8ff330922264d03deff7 100644 (file)
@@ -2776,7 +2776,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
        }
 
        config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
-       return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+       return chip->set_config(chip, gpio_chip_hwgpio(desc), config);
 }
 EXPORT_SYMBOL_GPL(gpiod_set_debounce);
 
@@ -2813,7 +2813,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
        packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
                                          !transitory);
        gpio = gpio_chip_hwgpio(desc);
-       rc = gpio_set_config(chip, gpio, packed);
+       rc = chip->set_config(chip, gpio, packed);
        if (rc == -ENOTSUPP) {
                dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
                                gpio);
index fb27783d7a542d565e1c002d03fc051d055039be..81127f7d6ed193c9cb996b685577a807f5e5646e 100644 (file)
@@ -5429,9 +5429,11 @@ static void get_freesync_config_for_crtc(
        struct amdgpu_dm_connector *aconnector =
                        to_amdgpu_dm_connector(new_con_state->base.connector);
        struct drm_display_mode *mode = &new_crtc_state->base.mode;
+       int vrefresh = drm_mode_vrefresh(mode);
 
        new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
-               aconnector->min_vfreq <= drm_mode_vrefresh(mode);
+                                       vrefresh >= aconnector->min_vfreq &&
+                                       vrefresh <= aconnector->max_vfreq;
 
        if (new_crtc_state->vrr_supported) {
                new_crtc_state->stream->ignore_msa_timing_param = true;
index 381581b01d485e581df8bcebcd6983b01bc8a488..05bbc2b622fc1094a2a8f85ce060d0805eae0f7e 100644 (file)
@@ -376,11 +376,7 @@ void drm_dev_unplug(struct drm_device *dev)
        synchronize_srcu(&drm_unplug_srcu);
 
        drm_dev_unregister(dev);
-
-       mutex_lock(&drm_global_mutex);
-       if (dev->open_count == 0)
-               drm_dev_put(dev);
-       mutex_unlock(&drm_global_mutex);
+       drm_dev_put(dev);
 }
 EXPORT_SYMBOL(drm_dev_unplug);
 
index 0e9349ff2d16a64dd6628ab47de8f9ab0271d632..af2ab640cadbb05105325a0de2b31ae5f5c70ccf 100644 (file)
@@ -1963,7 +1963,7 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
                                best_depth = fmt->depth;
                }
        }
-       if (sizes.surface_depth != best_depth) {
+       if (sizes.surface_depth != best_depth && best_depth) {
                DRM_INFO("requested bpp %d, scaled depth down to %d",
                         sizes.surface_bpp, best_depth);
                sizes.surface_depth = best_depth;
index 83a5bbca6e7e089f10d75ea723ac982b7df61356..7caa3c7ed9789901e4aa5df2c2204326cfe39c27 100644 (file)
@@ -489,11 +489,9 @@ int drm_release(struct inode *inode, struct file *filp)
 
        drm_close_helper(filp);
 
-       if (!--dev->open_count) {
+       if (!--dev->open_count)
                drm_lastclose(dev);
-               if (drm_dev_is_unplugged(dev))
-                       drm_put_dev(dev);
-       }
+
        mutex_unlock(&drm_global_mutex);
 
        drm_minor_release(minor);
index 35b4ec3f7618b887e5661d0d652cca99b6ed02c6..3592d04c33b283cac0abd2f432ce313194d2b606 100644 (file)
@@ -1441,7 +1441,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
        }
 
        if (index_mode) {
-               if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
+               if (guest_gma >= I915_GTT_PAGE_SIZE) {
                        ret = -EFAULT;
                        goto err;
                }
index c7103dd2d8d571fde462f173dcc67efc0973cc69..d7052ab7908c8d9c7872df64cb5cd68ec8f13b4e 100644 (file)
@@ -1882,7 +1882,11 @@ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
        }
 
        list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
+
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
        list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
+
        return mm;
 }
 
@@ -1967,9 +1971,10 @@ int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
                if (ret)
                        return ret;
 
+               mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
                list_move_tail(&mm->ppgtt_mm.lru_list,
                               &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
-
+               mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
        }
 
        return 0;
@@ -1980,6 +1985,8 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
        struct intel_vgpu_mm *mm;
        struct list_head *pos, *n;
 
+       mutex_lock(&gvt->gtt.ppgtt_mm_lock);
+
        list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
 
@@ -1987,9 +1994,11 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
                        continue;
 
                list_del_init(&mm->ppgtt_mm.lru_list);
+               mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
                invalidate_ppgtt_mm(mm);
                return 1;
        }
+       mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2659,6 +2668,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
                }
        }
        INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
+       mutex_init(&gvt->gtt.ppgtt_mm_lock);
        return 0;
 }
 
@@ -2699,7 +2709,9 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
        list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
                mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
                if (mm->type == INTEL_GVT_MM_PPGTT) {
+                       mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        list_del_init(&mm->ppgtt_mm.lru_list);
+                       mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
                        if (mm->ppgtt_mm.shadowed)
                                invalidate_ppgtt_mm(mm);
                }
index d8cb04cc946dff3e19466ff387089db96c226d53..edb610dc5d8689e49f22310b310133b9cb3ee921 100644 (file)
@@ -88,6 +88,7 @@ struct intel_gvt_gtt {
        void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
        struct list_head oos_page_use_list_head;
        struct list_head oos_page_free_list_head;
+       struct mutex ppgtt_mm_lock;
        struct list_head ppgtt_mm_lru_list_head;
 
        struct page *scratch_page;
index 7d84cfb9051ac886579648ac7bb2cc5e2a70b3fa..7902fb162d09441f9b4f65447f5e6619b8792c01 100644 (file)
@@ -132,6 +132,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
 
        {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
        {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
+       {RCS, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
 
        {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
        {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
index 1bb8f936fdaa75f2ee738bdf3235a247fac90fe8..159192c097cc7eb7424070e8cec052f3f5e5b1f7 100644 (file)
@@ -346,7 +346,7 @@ static int set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
        int i = 0;
 
        if (mm->type != INTEL_GVT_MM_PPGTT || !mm->ppgtt_mm.shadowed)
-               return -1;
+               return -EINVAL;
 
        if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
                px_dma(&ppgtt->pml4) = mm->ppgtt_mm.shadow_pdps[0];
@@ -410,12 +410,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        if (workload->shadow)
                return 0;
 
-       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
-       if (ret < 0) {
-               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
-               return ret;
-       }
-
        /* pin shadow context by gvt even the shadow context will be pinned
         * when i915 alloc request. That is because gvt will update the guest
         * context from shadow context when workload is completed, and at that
@@ -678,6 +672,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
        struct intel_vgpu *vgpu = workload->vgpu;
        struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+       struct intel_vgpu_submission *s = &vgpu->submission;
+       struct i915_gem_context *shadow_ctx = s->shadow_ctx;
+       struct i915_request *rq;
        int ring_id = workload->ring_id;
        int ret;
 
@@ -687,6 +684,12 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
        mutex_lock(&vgpu->vgpu_lock);
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       ret = set_context_ppgtt_from_shadow(workload, shadow_ctx);
+       if (ret < 0) {
+               gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
+               goto err_req;
+       }
+
        ret = intel_gvt_workload_req_alloc(workload);
        if (ret)
                goto err_req;
@@ -703,6 +706,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 
        ret = prepare_workload(workload);
 out:
+       if (ret) {
+               /* We might still need to add request with
+                * clean ctx to retire it properly..
+                */
+               rq = fetch_and_zero(&workload->req);
+               i915_request_put(rq);
+       }
+
        if (!IS_ERR_OR_NULL(workload->req)) {
                gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
                                ring_id, workload->req);
@@ -739,7 +750,8 @@ static struct intel_vgpu_workload *pick_next_workload(
                goto out;
        }
 
-       if (list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+       if (!scheduler->current_vgpu->active ||
+           list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
                goto out;
 
        /*
index 9adc7bb9e69ccfec96e468f95435b83e084ffcce..a67a63b5aa84a09d675793dc118fce8829315917 100644 (file)
@@ -2346,7 +2346,8 @@ static inline unsigned int i915_sg_segment_size(void)
                                 INTEL_DEVID(dev_priv) == 0x5915 || \
                                 INTEL_DEVID(dev_priv) == 0x591E)
 #define IS_AML_ULX(dev_priv)   (INTEL_DEVID(dev_priv) == 0x591C || \
-                                INTEL_DEVID(dev_priv) == 0x87C0)
+                                INTEL_DEVID(dev_priv) == 0x87C0 || \
+                                INTEL_DEVID(dev_priv) == 0x87CA)
 #define IS_SKL_GT2(dev_priv)   (IS_SKYLAKE(dev_priv) && \
                                 INTEL_INFO(dev_priv)->gt == 2)
 #define IS_SKL_GT3(dev_priv)   (IS_SKYLAKE(dev_priv) && \
index 638a586469f97be9fb83bbbcb152c518e7d46e1e..047855dd8c6b828ce42f926680f7d8466883d3cc 100644 (file)
@@ -2863,7 +2863,7 @@ enum i915_power_well_id {
 #define GEN11_GT_VEBOX_VDBOX_DISABLE   _MMIO(0x9140)
 #define   GEN11_GT_VDBOX_DISABLE_MASK  0xff
 #define   GEN11_GT_VEBOX_DISABLE_SHIFT 16
-#define   GEN11_GT_VEBOX_DISABLE_MASK  (0xff << GEN11_GT_VEBOX_DISABLE_SHIFT)
+#define   GEN11_GT_VEBOX_DISABLE_MASK  (0x0f << GEN11_GT_VEBOX_DISABLE_SHIFT)
 
 #define GEN11_EU_DISABLE _MMIO(0x9134)
 #define GEN11_EU_DIS_MASK 0xFF
@@ -9243,7 +9243,7 @@ enum skl_power_gate {
 #define TRANS_DDI_FUNC_CTL2(tran)      _MMIO_TRANS2(tran, \
                                                     _TRANS_DDI_FUNC_CTL2_A)
 #define  PORT_SYNC_MODE_ENABLE                 (1 << 4)
-#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) < 0)
+#define  PORT_SYNC_MODE_MASTER_SELECT(x)       ((x) << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_MASK     (0x7 << 0)
 #define  PORT_SYNC_MODE_MASTER_SELECT_SHIFT    0
 
index 32dce7176f6381dc2a0429691dccc2eafc7fe360..b9b0ea4e2404d6cfce2c37be5d331591fb88fe6e 100644 (file)
@@ -455,7 +455,7 @@ static int igt_evict_contexts(void *arg)
                        struct i915_gem_context *ctx;
 
                        ctx = live_context(i915, file);
-                       if (!ctx)
+                       if (IS_ERR(ctx))
                                break;
 
                        /* We will need some GGTT space for the rq's context */
index 2281ed3eb7747757620288069f32d48a53b9ea15..8a4ebcb6405cee2427d0889ea49a0d871d2cc5ba 100644 (file)
@@ -337,12 +337,14 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
 
        ret = drm_dev_register(drm, 0);
        if (ret)
-               goto free_drm;
+               goto uninstall_irq;
 
        drm_fbdev_generic_setup(drm, 32);
 
        return 0;
 
+uninstall_irq:
+       drm_irq_uninstall(drm);
 free_drm:
        drm_dev_put(drm);
 
@@ -356,8 +358,8 @@ static int meson_drv_bind(struct device *dev)
 
 static void meson_drv_unbind(struct device *dev)
 {
-       struct drm_device *drm = dev_get_drvdata(dev);
-       struct meson_drm *priv = drm->dev_private;
+       struct meson_drm *priv = dev_get_drvdata(dev);
+       struct drm_device *drm = priv->drm;
 
        if (priv->canvas) {
                meson_canvas_free(priv->canvas, priv->canvas_id_osd1);
@@ -367,6 +369,7 @@ static void meson_drv_unbind(struct device *dev)
        }
 
        drm_dev_unregister(drm);
+       drm_irq_uninstall(drm);
        drm_kms_helper_poll_fini(drm);
        drm_mode_config_cleanup(drm);
        drm_dev_put(drm);
index e28814f4ea6cd2e05724ee46a0892b261d3d4cef..563953ec6ad03fd904c2e5c38de8cbe1dc2edce0 100644 (file)
@@ -569,7 +569,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
        DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
 
        /* If sink max TMDS clock, we reject the mode */
-       if (mode->clock > connector->display_info.max_tmds_clock)
+       if (connector->display_info.max_tmds_clock &&
+           mode->clock > connector->display_info.max_tmds_clock)
                return MODE_BAD;
 
        /* Check against non-VIC supported modes */
index c7d4c6073ea59b70c56559288def3fb7fd6fe215..0d4ade9d4722c340b706b82d7ea7bb587db5f293 100644 (file)
@@ -541,6 +541,18 @@ static void vop_core_clks_disable(struct vop *vop)
        clk_disable(vop->hclk);
 }
 
+static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
+{
+       if (win->phy->scl && win->phy->scl->ext) {
+               VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
+               VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
+       }
+
+       VOP_WIN_SET(vop, win, enable, 0);
+}
+
 static int vop_enable(struct drm_crtc *crtc)
 {
        struct vop *vop = to_vop(crtc);
@@ -586,7 +598,7 @@ static int vop_enable(struct drm_crtc *crtc)
                struct vop_win *vop_win = &vop->win[i];
                const struct vop_win_data *win = vop_win->data;
 
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
        }
        spin_unlock(&vop->reg_lock);
 
@@ -735,7 +747,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
 
        spin_lock(&vop->reg_lock);
 
-       VOP_WIN_SET(vop, win, enable, 0);
+       vop_win_disable(vop, win);
 
        spin_unlock(&vop->reg_lock);
 }
@@ -1622,7 +1634,7 @@ static int vop_initial(struct vop *vop)
                int channel = i * 2 + 1;
 
                VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
-               VOP_WIN_SET(vop, win, enable, 0);
+               vop_win_disable(vop, win);
                VOP_WIN_SET(vop, win, gate, 1);
        }
 
index ba9b3cfb8c3d247fae80f8026cc520936e5b954c..b3436c2aed6892b585ca221a9ac711027350310e 100644 (file)
@@ -378,14 +378,16 @@ static int tegra_shared_plane_atomic_check(struct drm_plane *plane,
 static void tegra_shared_plane_atomic_disable(struct drm_plane *plane,
                                              struct drm_plane_state *old_state)
 {
-       struct tegra_dc *dc = to_tegra_dc(old_state->crtc);
        struct tegra_plane *p = to_tegra_plane(plane);
+       struct tegra_dc *dc;
        u32 value;
 
        /* rien ne va plus */
        if (!old_state || !old_state->crtc)
                return;
 
+       dc = to_tegra_dc(old_state->crtc);
+
        /*
         * XXX Legacy helpers seem to sometimes call ->atomic_disable() even
         * on planes that are already disabled. Make sure we fallback to the
index 39bfed9623de28f0e62a0297f8e84b7151c28238..982ce37ecde1b0c9fc6ef07c9819b98541248151 100644 (file)
@@ -106,6 +106,7 @@ static int vic_boot(struct vic *vic)
        if (vic->booted)
                return 0;
 
+#ifdef CONFIG_IOMMU_API
        if (vic->config->supports_sid) {
                struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev);
                u32 value;
@@ -121,6 +122,7 @@ static int vic_boot(struct vic *vic)
                        vic_writel(vic, value, VIC_THI_STREAMID1);
                }
        }
+#endif
 
        /* setup clockgating registers */
        vic_writel(vic, CG_IDLE_CG_DLY_CNT(4) |
index 66885c24590f0147ce1a510991a546c4f2bbe427..c1bd5e3d9e4aee80bb185cc38307fb389fe54c2f 100644 (file)
 #include "udl_connector.h"
 #include "udl_drv.h"
 
-static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
-                                                          u8 *buff)
+static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
+                              size_t len)
 {
        int ret, i;
        u8 *read_buff;
+       struct udl_device *udl = data;
 
        read_buff = kmalloc(2, GFP_KERNEL);
        if (!read_buff)
-               return false;
+               return -1;
 
-       for (i = 0; i < EDID_LENGTH; i++) {
-               int bval = (i + block_idx * EDID_LENGTH) << 8;
+       for (i = 0; i < len; i++) {
+               int bval = (i + block * EDID_LENGTH) << 8;
                ret = usb_control_msg(udl->udev,
                                      usb_rcvctrlpipe(udl->udev, 0),
                                          (0x02), (0x80 | (0x02 << 5)), bval,
@@ -37,60 +38,13 @@ static bool udl_get_edid_block(struct udl_device *udl, int block_idx,
                if (ret < 1) {
                        DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
                        kfree(read_buff);
-                       return false;
+                       return -1;
                }
-               buff[i] = read_buff[1];
+               buf[i] = read_buff[1];
        }
 
        kfree(read_buff);
-       return true;
-}
-
-static bool udl_get_edid(struct udl_device *udl, u8 **result_buff,
-                        int *result_buff_size)
-{
-       int i, extensions;
-       u8 *block_buff = NULL, *buff_ptr;
-
-       block_buff = kmalloc(EDID_LENGTH, GFP_KERNEL);
-       if (block_buff == NULL)
-               return false;
-
-       if (udl_get_edid_block(udl, 0, block_buff) &&
-           memchr_inv(block_buff, 0, EDID_LENGTH)) {
-               extensions = ((struct edid *)block_buff)->extensions;
-               if (extensions > 0) {
-                       /* we have to read all extensions one by one */
-                       *result_buff_size = EDID_LENGTH * (extensions + 1);
-                       *result_buff = kmalloc(*result_buff_size, GFP_KERNEL);
-                       buff_ptr = *result_buff;
-                       if (buff_ptr == NULL) {
-                               kfree(block_buff);
-                               return false;
-                       }
-                       memcpy(buff_ptr, block_buff, EDID_LENGTH);
-                       kfree(block_buff);
-                       buff_ptr += EDID_LENGTH;
-                       for (i = 1; i < extensions; ++i) {
-                               if (udl_get_edid_block(udl, i, buff_ptr)) {
-                                       buff_ptr += EDID_LENGTH;
-                               } else {
-                                       kfree(*result_buff);
-                                       *result_buff = NULL;
-                                       return false;
-                               }
-                       }
-                       return true;
-               }
-               /* we have only base edid block */
-               *result_buff = block_buff;
-               *result_buff_size = EDID_LENGTH;
-               return true;
-       }
-
-       kfree(block_buff);
-
-       return false;
+       return 0;
 }
 
 static int udl_get_modes(struct drm_connector *connector)
@@ -122,8 +76,6 @@ static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
 static enum drm_connector_status
 udl_detect(struct drm_connector *connector, bool force)
 {
-       u8 *edid_buff = NULL;
-       int edid_buff_size = 0;
        struct udl_device *udl = connector->dev->dev_private;
        struct udl_drm_connector *udl_connector =
                                        container_of(connector,
@@ -136,12 +88,10 @@ udl_detect(struct drm_connector *connector, bool force)
                udl_connector->edid = NULL;
        }
 
-
-       if (!udl_get_edid(udl, &edid_buff, &edid_buff_size))
+       udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
+       if (!udl_connector->edid)
                return connector_status_disconnected;
 
-       udl_connector->edid = (struct edid *)edid_buff;
-       
        return connector_status_connected;
 }
 
index 5930facd6d2d85cca81cb9c1f5247a6be3632546..11a8f99ba18c5f007734abef1003cc44d5e778a1 100644 (file)
@@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
        ret = drm_gem_handle_create(file, &obj->base, handle);
        drm_gem_object_put_unlocked(&obj->base);
        if (ret)
-               goto err;
+               return ERR_PTR(ret);
 
        return &obj->base;
-
-err:
-       __vgem_gem_destroy(obj);
-       return ERR_PTR(ret);
 }
 
 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
index 138b0bb325cf9662cd59b5a54158947dc691a2d9..69048e73377dc97855aa3b71491008e5993a5304 100644 (file)
@@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
 
        ret = drm_gem_handle_create(file, &obj->gem, handle);
        drm_gem_object_put_unlocked(&obj->gem);
-       if (ret) {
-               drm_gem_object_release(&obj->gem);
-               kfree(obj);
+       if (ret)
                return ERR_PTR(ret);
-       }
 
        return &obj->gem;
 }
index f2c6819712013046246002346af928bd1ab16bc0..f8979abb9a19ca963bf9625fc911ab74590b388a 100644 (file)
@@ -131,6 +131,7 @@ config I2C_I801
            Cannon Lake (PCH)
            Cedar Fork (PCH)
            Ice Lake (PCH)
+           Comet Lake (PCH)
 
          This driver can also be built as a module.  If so, the module
          will be called i2c-i801.
index c91e145ef5a56dbb1a512c23611f06ad7d22aa05..679c6c41f64b49babf8b0a7505d56a2c4093f6c7 100644 (file)
@@ -71,6 +71,7 @@
  * Cannon Lake-LP (PCH)                0x9da3  32      hard    yes     yes     yes
  * Cedar Fork (PCH)            0x18df  32      hard    yes     yes     yes
  * Ice Lake-LP (PCH)           0x34a3  32      hard    yes     yes     yes
+ * Comet Lake (PCH)            0x02a3  32      hard    yes     yes     yes
  *
  * Features supported by this driver:
  * Software PEC                                no
 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS       0xa223
 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS       0xa2a3
 #define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS         0xa323
+#define PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS            0x02a3
 
 struct i801_mux_config {
        char *gpio_chip;
@@ -1038,6 +1040,7 @@ static const struct pci_device_id i801_ids[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
        { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
        { 0, }
 };
 
@@ -1534,6 +1537,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
        case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
        case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
        case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
+       case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
                priv->features |= FEATURE_I2C_BLOCK_READ;
                priv->features |= FEATURE_IRQ;
                priv->features |= FEATURE_SMBUS_PEC;
index 21cb088d66877a4bd6c8c914e72eff5c385de031..f7cdd2ab7f11f6cba22003d4cf71576b4bc77b72 100644 (file)
@@ -3169,21 +3169,24 @@ static void amd_iommu_get_resv_regions(struct device *dev,
                return;
 
        list_for_each_entry(entry, &amd_iommu_unity_map, list) {
+               int type, prot = 0;
                size_t length;
-               int prot = 0;
 
                if (devid < entry->devid_start || devid > entry->devid_end)
                        continue;
 
+               type   = IOMMU_RESV_DIRECT;
                length = entry->address_end - entry->address_start;
                if (entry->prot & IOMMU_PROT_IR)
                        prot |= IOMMU_READ;
                if (entry->prot & IOMMU_PROT_IW)
                        prot |= IOMMU_WRITE;
+               if (entry->prot & IOMMU_UNITY_MAP_FLAG_EXCL_RANGE)
+                       /* Exclusion range */
+                       type = IOMMU_RESV_RESERVED;
 
                region = iommu_alloc_resv_region(entry->address_start,
-                                                length, prot,
-                                                IOMMU_RESV_DIRECT);
+                                                length, prot, type);
                if (!region) {
                        dev_err(dev, "Out of memory allocating dm-regions\n");
                        return;
index f773792d77fd533be53ec3796692c140f77121d7..1b1378619fc9ec2f0caa0bbbd262192c21de61e4 100644 (file)
@@ -2013,6 +2013,9 @@ static int __init init_unity_map_range(struct ivmd_header *m)
        if (e == NULL)
                return -ENOMEM;
 
+       if (m->flags & IVMD_FLAG_EXCL_RANGE)
+               init_exclusion_range(m);
+
        switch (m->type) {
        default:
                kfree(e);
@@ -2059,9 +2062,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
 
        while (p < end) {
                m = (struct ivmd_header *)p;
-               if (m->flags & IVMD_FLAG_EXCL_RANGE)
-                       init_exclusion_range(m);
-               else if (m->flags & IVMD_FLAG_UNITY_MAP)
+               if (m->flags & (IVMD_FLAG_UNITY_MAP | IVMD_FLAG_EXCL_RANGE))
                        init_unity_map_range(m);
 
                p += m->length;
index eae0741f72dce2fcea771e415a986b062515c7fc..87965e4d964771bd2352d6254bba299f43734107 100644 (file)
 #define IOMMU_PROT_IR 0x01
 #define IOMMU_PROT_IW 0x02
 
+#define IOMMU_UNITY_MAP_FLAG_EXCL_RANGE        (1 << 2)
+
 /* IOMMU capabilities */
 #define IOMMU_CAP_IOTLB   24
 #define IOMMU_CAP_NPCACHE 26
index f101afc315abb8da199fa1a9d2bd4df3d44d82e9..9a8a8870e26727e7398afffd5286860b0e8581d9 100644 (file)
 
 #define ARM_V7S_TCR_PD1                        BIT(5)
 
+#ifdef CONFIG_ZONE_DMA32
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
+#else
+#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
+#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
+#endif
+
 typedef u32 arm_v7s_iopte;
 
 static bool selftest_running;
@@ -197,13 +205,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
        void *table = NULL;
 
        if (lvl == 1)
-               table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
+               table = (void *)__get_free_pages(
+                       __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
        else if (lvl == 2)
-               table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
+               table = kmem_cache_zalloc(data->l2_tables, gfp);
        phys = virt_to_phys(table);
-       if (phys != (arm_v7s_iopte)phys)
+       if (phys != (arm_v7s_iopte)phys) {
                /* Doesn't fit in PTE */
+               dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
                goto out_free;
+       }
        if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
                dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
                if (dma_mapping_error(dev, dma))
@@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
        data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
                                            ARM_V7S_TABLE_SIZE(2),
                                            ARM_V7S_TABLE_SIZE(2),
-                                           SLAB_CACHE_DMA, NULL);
+                                           ARM_V7S_TABLE_SLAB_FLAGS, NULL);
        if (!data->l2_tables)
                goto out_free_data;
 
index 33a982e33716369b7d2bf5885ef5a8ed22f04414..109de67d5d727c227d3970b2879edd60d6478357 100644 (file)
@@ -1105,10 +1105,12 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
 
                dom = __iommu_domain_alloc(dev->bus, iommu_def_domain_type);
                if (!dom && iommu_def_domain_type != IOMMU_DOMAIN_DMA) {
-                       dev_warn(dev,
-                                "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
-                                iommu_def_domain_type);
                        dom = __iommu_domain_alloc(dev->bus, IOMMU_DOMAIN_DMA);
+                       if (dom) {
+                               dev_warn(dev,
+                                        "failed to allocate default IOMMU domain of type %u; falling back to IOMMU_DOMAIN_DMA",
+                                        iommu_def_domain_type);
+                       }
                }
 
                group->default_domain = dom;
index 4d85645c87f78721a83fcef94be1feb3bce8c094..0928fd1f0e0c134943c7dab3aeec7d0e5699eaf5 100644 (file)
@@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
        if (m->clock2)
                test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
 
-       if (ent->device == 0xB410) {
+       if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
+           ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
                test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
                test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
                test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
index 7fea18b0c15d115178c874163232fa6c2e3706ce..7cb4d685a1f107f335c7cf475d06aae6688ab6bc 100644 (file)
@@ -513,6 +513,7 @@ static int pca9532_probe(struct i2c_client *client,
        const struct i2c_device_id *id)
 {
        int devid;
+       const struct of_device_id *of_id;
        struct pca9532_data *data = i2c_get_clientdata(client);
        struct pca9532_platform_data *pca9532_pdata =
                        dev_get_platdata(&client->dev);
@@ -528,8 +529,11 @@ static int pca9532_probe(struct i2c_client *client,
                        dev_err(&client->dev, "no platform data\n");
                        return -EINVAL;
                }
-               devid = (int)(uintptr_t)of_match_device(
-                       of_pca9532_leds_match, &client->dev)->data;
+               of_id = of_match_device(of_pca9532_leds_match,
+                               &client->dev);
+               if (unlikely(!of_id))
+                       return -EINVAL;
+               devid = (int)(uintptr_t) of_id->data;
        } else {
                devid = id->driver_data;
        }
index 3dd3ed46d473b673fd916085044bd3c3b38b0759..136f86a1627d18cf396990ca1a4122d17578d0af 100644 (file)
@@ -122,7 +122,8 @@ static ssize_t device_name_store(struct device *dev,
                trigger_data->net_dev = NULL;
        }
 
-       strncpy(trigger_data->device_name, buf, size);
+       memcpy(trigger_data->device_name, buf, size);
+       trigger_data->device_name[size] = 0;
        if (size > 0 && trigger_data->device_name[size - 1] == '\n')
                trigger_data->device_name[size - 1] = 0;
 
@@ -301,11 +302,11 @@ static int netdev_trig_notify(struct notifier_block *nb,
                container_of(nb, struct led_netdev_data, notifier);
 
        if (evt != NETDEV_UP && evt != NETDEV_DOWN && evt != NETDEV_CHANGE
-           && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER
-           && evt != NETDEV_CHANGENAME)
+           && evt != NETDEV_REGISTER && evt != NETDEV_UNREGISTER)
                return NOTIFY_DONE;
 
-       if (strcmp(dev->name, trigger_data->device_name))
+       if (!(dev == trigger_data->net_dev ||
+             (evt == NETDEV_REGISTER && !strcmp(dev->name, trigger_data->device_name))))
                return NOTIFY_DONE;
 
        cancel_delayed_work_sync(&trigger_data->work);
@@ -320,12 +321,9 @@ static int netdev_trig_notify(struct notifier_block *nb,
                dev_hold(dev);
                trigger_data->net_dev = dev;
                break;
-       case NETDEV_CHANGENAME:
        case NETDEV_UNREGISTER:
-               if (trigger_data->net_dev) {
-                       dev_put(trigger_data->net_dev);
-                       trigger_data->net_dev = NULL;
-               }
+               dev_put(trigger_data->net_dev);
+               trigger_data->net_dev = NULL;
                break;
        case NETDEV_UP:
        case NETDEV_CHANGE:
index 3525236ed8d9d702e25fac066926ba1933fe4edc..19c84214a7ea8890543ea8341033ed1ceb89df12 100644 (file)
@@ -179,6 +179,12 @@ static void cs_do_release(struct kref *ref)
 
        /* We also need to update CI for internal queues */
        if (cs->submitted) {
+               int cs_cnt = atomic_dec_return(&hdev->cs_active_cnt);
+
+               WARN_ONCE((cs_cnt < 0),
+                       "hl%d: error in CS active cnt %d\n",
+                       hdev->id, cs_cnt);
+
                hl_int_hw_queue_update_ci(cs);
 
                spin_lock(&hdev->hw_queues_mirror_lock);
index a53c12aff6ad9cebd9be4a2b031b9a93ea6c72b3..974a87789bd8689d1530daa8890bac3b3b32d38c 100644 (file)
@@ -232,6 +232,7 @@ static int vm_show(struct seq_file *s, void *data)
        struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
        enum vm_type_t *vm_type;
        bool once = true;
+       u64 j;
        int i;
 
        if (!dev_entry->hdev->mmu_enable)
@@ -260,7 +261,7 @@ static int vm_show(struct seq_file *s, void *data)
                        } else {
                                phys_pg_pack = hnode->ptr;
                                seq_printf(s,
-                                       "    0x%-14llx      %-10u       %-4u\n",
+                                       "    0x%-14llx      %-10llu       %-4u\n",
                                        hnode->vaddr, phys_pg_pack->total_size,
                                        phys_pg_pack->handle);
                        }
@@ -282,9 +283,9 @@ static int vm_show(struct seq_file *s, void *data)
                                                phys_pg_pack->page_size);
                        seq_puts(s, "   physical address\n");
                        seq_puts(s, "---------------------\n");
-                       for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+                       for (j = 0 ; j < phys_pg_pack->npages ; j++) {
                                seq_printf(s, "    0x%-14llx\n",
-                                               phys_pg_pack->pages[i]);
+                                               phys_pg_pack->pages[j]);
                        }
                }
                spin_unlock(&vm->idr_lock);
index de46aa6ed1542438c5d5952ff77c9cc17dadc5a6..77d51be66c7e84045558fff78eea0a8e9a70439e 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/sched/signal.h>
 #include <linux/hwmon.h>
 
+#define HL_PLDM_PENDING_RESET_PER_SEC  (HL_PENDING_RESET_PER_SEC * 10)
+
 bool hl_device_disabled_or_in_reset(struct hl_device *hdev)
 {
        if ((hdev->disabled) || (atomic_read(&hdev->in_reset)))
@@ -216,6 +218,7 @@ static int device_early_init(struct hl_device *hdev)
        spin_lock_init(&hdev->hw_queues_mirror_lock);
        atomic_set(&hdev->in_reset, 0);
        atomic_set(&hdev->fd_open_cnt, 0);
+       atomic_set(&hdev->cs_active_cnt, 0);
 
        return 0;
 
@@ -413,6 +416,27 @@ int hl_device_suspend(struct hl_device *hdev)
 
        pci_save_state(hdev->pdev);
 
+       /* Block future CS/VM/JOB completion operations */
+       rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+       if (rc) {
+               dev_err(hdev->dev, "Can't suspend while in reset\n");
+               return -EIO;
+       }
+
+       /* This blocks all other stuff that is not blocked by in_reset */
+       hdev->disabled = true;
+
+       /*
+        * Flush anyone that is inside the critical section of enqueue
+        * jobs to the H/W
+        */
+       hdev->asic_funcs->hw_queues_lock(hdev);
+       hdev->asic_funcs->hw_queues_unlock(hdev);
+
+       /* Flush processes that are sending message to CPU */
+       mutex_lock(&hdev->send_cpu_message_lock);
+       mutex_unlock(&hdev->send_cpu_message_lock);
+
        rc = hdev->asic_funcs->suspend(hdev);
        if (rc)
                dev_err(hdev->dev,
@@ -440,21 +464,38 @@ int hl_device_resume(struct hl_device *hdev)
 
        pci_set_power_state(hdev->pdev, PCI_D0);
        pci_restore_state(hdev->pdev);
-       rc = pci_enable_device(hdev->pdev);
+       rc = pci_enable_device_mem(hdev->pdev);
        if (rc) {
                dev_err(hdev->dev,
                        "Failed to enable PCI device in resume\n");
                return rc;
        }
 
+       pci_set_master(hdev->pdev);
+
        rc = hdev->asic_funcs->resume(hdev);
        if (rc) {
-               dev_err(hdev->dev,
-                       "Failed to enable PCI access from device CPU\n");
-               return rc;
+               dev_err(hdev->dev, "Failed to resume device after suspend\n");
+               goto disable_device;
+       }
+
+
+       hdev->disabled = false;
+       atomic_set(&hdev->in_reset, 0);
+
+       rc = hl_device_reset(hdev, true, false);
+       if (rc) {
+               dev_err(hdev->dev, "Failed to reset device during resume\n");
+               goto disable_device;
        }
 
        return 0;
+
+disable_device:
+       pci_clear_master(hdev->pdev);
+       pci_disable_device(hdev->pdev);
+
+       return rc;
 }
 
 static void hl_device_hard_reset_pending(struct work_struct *work)
@@ -462,9 +503,16 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
        struct hl_device_reset_work *device_reset_work =
                container_of(work, struct hl_device_reset_work, reset_work);
        struct hl_device *hdev = device_reset_work->hdev;
-       u16 pending_cnt = HL_PENDING_RESET_PER_SEC;
+       u16 pending_total, pending_cnt;
        struct task_struct *task = NULL;
 
+       if (hdev->pldm)
+               pending_total = HL_PLDM_PENDING_RESET_PER_SEC;
+       else
+               pending_total = HL_PENDING_RESET_PER_SEC;
+
+       pending_cnt = pending_total;
+
        /* Flush all processes that are inside hl_open */
        mutex_lock(&hdev->fd_open_cnt_lock);
 
@@ -489,6 +537,19 @@ static void hl_device_hard_reset_pending(struct work_struct *work)
                }
        }
 
+       pending_cnt = pending_total;
+
+       while ((atomic_read(&hdev->fd_open_cnt)) && (pending_cnt)) {
+
+               pending_cnt--;
+
+               ssleep(1);
+       }
+
+       if (atomic_read(&hdev->fd_open_cnt))
+               dev_crit(hdev->dev,
+                       "Going to hard reset with open user contexts\n");
+
        mutex_unlock(&hdev->fd_open_cnt_lock);
 
        hl_device_reset(hdev, true, true);
index 238dd57c541bdf1e632f8ff008f69bafc3e5e59a..ea979ebd62fb8c5f30d08b052a0e481325470ece 100644 (file)
@@ -1201,15 +1201,6 @@ static int goya_stop_external_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_external_queues(struct hl_device *hdev)
-{
-       WREG32(mmDMA_QM_0_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_1_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_2_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_3_GLBL_CFG1, 0);
-       WREG32(mmDMA_QM_4_GLBL_CFG1, 0);
-}
-
 /*
  * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU
  *
@@ -2178,36 +2169,6 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
        return retval;
 }
 
-static void goya_resume_internal_queues(struct hl_device *hdev)
-{
-       WREG32(mmMME_QM_GLBL_CFG1, 0);
-       WREG32(mmMME_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC0_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC1_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC2_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC3_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC4_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC5_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC6_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0);
-
-       WREG32(mmTPC7_QM_GLBL_CFG1, 0);
-       WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0);
-}
-
 static void goya_dma_stall(struct hl_device *hdev)
 {
        WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -2905,20 +2866,6 @@ int goya_suspend(struct hl_device *hdev)
 {
        int rc;
 
-       rc = goya_stop_internal_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop internal queues\n");
-               return rc;
-       }
-
-       rc = goya_stop_external_queues(hdev);
-
-       if (rc) {
-               dev_err(hdev->dev, "failed to stop external queues\n");
-               return rc;
-       }
-
        rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS);
        if (rc)
                dev_err(hdev->dev, "Failed to disable PCI access from CPU\n");
@@ -2928,15 +2875,7 @@ int goya_suspend(struct hl_device *hdev)
 
 int goya_resume(struct hl_device *hdev)
 {
-       int rc;
-
-       goya_resume_external_queues(hdev);
-       goya_resume_internal_queues(hdev);
-
-       rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS);
-       if (rc)
-               dev_err(hdev->dev, "Failed to enable PCI access from CPU\n");
-       return rc;
+       return goya_init_iatu(hdev);
 }
 
 static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma,
@@ -3070,7 +3009,7 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id,
 
        *dma_handle = hdev->asic_prop.sram_base_address;
 
-       base = hdev->pcie_bar[SRAM_CFG_BAR_ID];
+       base = (void *) hdev->pcie_bar[SRAM_CFG_BAR_ID];
 
        switch (queue_id) {
        case GOYA_QUEUE_ID_MME:
index a7c95e9f9b9a8808efa70651e66c34625ac82d0a..a8ee52c880cd800651681b866048126b2e9fc478 100644 (file)
@@ -793,11 +793,11 @@ struct hl_vm_hash_node {
  * struct hl_vm_phys_pg_pack - physical page pack.
  * @vm_type: describes the type of the virtual area descriptor.
  * @pages: the physical page array.
+ * @npages: num physical pages in the pack.
+ * @total_size: total size of all the pages in this list.
  * @mapping_cnt: number of shared mappings.
  * @asid: the context related to this list.
- * @npages: num physical pages in the pack.
  * @page_size: size of each page in the pack.
- * @total_size: total size of all the pages in this list.
  * @flags: HL_MEM_* flags related to this list.
  * @handle: the provided handle related to this list.
  * @offset: offset from the first page.
@@ -807,11 +807,11 @@ struct hl_vm_hash_node {
 struct hl_vm_phys_pg_pack {
        enum vm_type_t          vm_type; /* must be first */
        u64                     *pages;
+       u64                     npages;
+       u64                     total_size;
        atomic_t                mapping_cnt;
        u32                     asid;
-       u32                     npages;
        u32                     page_size;
-       u32                     total_size;
        u32                     flags;
        u32                     handle;
        u32                     offset;
@@ -1056,13 +1056,15 @@ struct hl_device_reset_work {
  * @cb_pool_lock: protects the CB pool.
  * @user_ctx: current user context executing.
  * @dram_used_mem: current DRAM memory consumption.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
- * @fd_open_cnt: number of open user processes.
  * @timeout_jiffies: device CS timeout value.
  * @max_power: the max power of the device, as configured by the sysadmin. This
  *             value is saved so in case of hard-reset, KMD will restore this
  *             value and update the F/W after the re-initialization
+ * @in_reset: is device in reset flow.
+ * @curr_pll_profile: current PLL profile.
+ * @fd_open_cnt: number of open user processes.
+ * @cs_active_cnt: number of active command submissions on this device (active
+ *                 means already in H/W queues)
  * @major: habanalabs KMD major.
  * @high_pll: high PLL profile frequency.
  * @soft_reset_cnt: number of soft reset since KMD loading.
@@ -1128,11 +1130,12 @@ struct hl_device {
        struct hl_ctx                   *user_ctx;
 
        atomic64_t                      dram_used_mem;
+       u64                             timeout_jiffies;
+       u64                             max_power;
        atomic_t                        in_reset;
        atomic_t                        curr_pll_profile;
        atomic_t                        fd_open_cnt;
-       u64                             timeout_jiffies;
-       u64                             max_power;
+       atomic_t                        cs_active_cnt;
        u32                             major;
        u32                             high_pll;
        u32                             soft_reset_cnt;
index 67bece26417cbe930fa018abdb33c88ba8618b23..ef3bb695136025971c76b916a97dde8a4b36905b 100644 (file)
@@ -370,12 +370,13 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
                spin_unlock(&hdev->hw_queues_mirror_lock);
        }
 
-       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) {
+       atomic_inc(&hdev->cs_active_cnt);
+
+       list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
                if (job->ext_queue)
                        ext_hw_queue_schedule_job(job);
                else
                        int_hw_queue_schedule_job(job);
-       }
 
        cs->submitted = true;
 
index 3a12fd1a5274479e89406947991fd709203e6726..ce1fda40a8b8112572b9a26db139c8aa6de76f8e 100644 (file)
@@ -56,9 +56,9 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        struct hl_device *hdev = ctx->hdev;
        struct hl_vm *vm = &hdev->vm;
        struct hl_vm_phys_pg_pack *phys_pg_pack;
-       u64 paddr = 0;
-       u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift;
-       int handle, rc, i;
+       u64 paddr = 0, total_size, num_pgs, i;
+       u32 num_curr_pgs, page_size, page_shift;
+       int handle, rc;
        bool contiguous;
 
        num_curr_pgs = 0;
@@ -73,7 +73,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
                paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size);
                if (!paddr) {
                        dev_err(hdev->dev,
-                               "failed to allocate %u huge contiguous pages\n",
+                               "failed to allocate %llu huge contiguous pages\n",
                                num_pgs);
                        return -ENOMEM;
                }
@@ -93,7 +93,7 @@ static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
        phys_pg_pack->flags = args->flags;
        phys_pg_pack->contiguous = contiguous;
 
-       phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto pages_arr_err;
@@ -148,7 +148,7 @@ page_err:
                        gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
                                        page_size);
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
 pages_arr_err:
        kfree(phys_pg_pack);
 pages_pack_err:
@@ -267,7 +267,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_vm *vm = &hdev->vm;
-       int i;
+       u64 i;
 
        if (!phys_pg_pack->created_from_userptr) {
                if (phys_pg_pack->contiguous) {
@@ -288,7 +288,7 @@ static void free_phys_pg_pack(struct hl_device *hdev,
                }
        }
 
-       kfree(phys_pg_pack->pages);
+       kvfree(phys_pg_pack->pages);
        kfree(phys_pg_pack);
 }
 
@@ -519,7 +519,7 @@ static inline int add_va_block(struct hl_device *hdev,
  * - Return the start address of the virtual block
  */
 static u64 get_va_block(struct hl_device *hdev,
-               struct hl_va_range *va_range, u32 size, u64 hint_addr,
+               struct hl_va_range *va_range, u64 size, u64 hint_addr,
                bool is_userptr)
 {
        struct hl_vm_va_block *va_block, *new_va_block = NULL;
@@ -577,7 +577,8 @@ static u64 get_va_block(struct hl_device *hdev,
        }
 
        if (!new_va_block) {
-               dev_err(hdev->dev, "no available va block for size %u\n", size);
+               dev_err(hdev->dev, "no available va block for size %llu\n",
+                               size);
                goto out;
        }
 
@@ -648,8 +649,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
        struct hl_vm_phys_pg_pack *phys_pg_pack;
        struct scatterlist *sg;
        dma_addr_t dma_addr;
-       u64 page_mask;
-       u32 npages, total_npages, page_size = PAGE_SIZE;
+       u64 page_mask, total_npages;
+       u32 npages, page_size = PAGE_SIZE;
        bool first = true, is_huge_page_opt = true;
        int rc, i, j;
 
@@ -691,7 +692,8 @@ static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
 
        page_mask = ~(((u64) page_size) - 1);
 
-       phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL);
+       phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
+                                               GFP_KERNEL);
        if (!phys_pg_pack->pages) {
                rc = -ENOMEM;
                goto page_pack_arr_mem_err;
@@ -750,9 +752,9 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                struct hl_vm_phys_pg_pack *phys_pg_pack)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 next_vaddr = vaddr, paddr;
+       u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
        u32 page_size = phys_pg_pack->page_size;
-       int i, rc = 0, mapped_pg_cnt = 0;
+       int rc = 0;
 
        for (i = 0 ; i < phys_pg_pack->npages ; i++) {
                paddr = phys_pg_pack->pages[i];
@@ -764,7 +766,7 @@ static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr,
                rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size);
                if (rc) {
                        dev_err(hdev->dev,
-                               "map failed for handle %u, npages: %d, mapped: %d",
+                               "map failed for handle %u, npages: %llu, mapped: %llu",
                                phys_pg_pack->handle, phys_pg_pack->npages,
                                mapped_pg_cnt);
                        goto err;
@@ -985,10 +987,10 @@ static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr)
        struct hl_vm_hash_node *hnode = NULL;
        struct hl_userptr *userptr = NULL;
        enum vm_type_t *vm_type;
-       u64 next_vaddr;
+       u64 next_vaddr, i;
        u32 page_size;
        bool is_userptr;
-       int i, rc;
+       int rc;
 
        /* protect from double entrance */
        mutex_lock(&ctx->mem_hash_lock);
index 2f2e99cb27439433bd4527350b2347a6856cab5d..3a5a2cec83051b08c1b838372aaf29c0f1b99e13 100644 (file)
@@ -832,7 +832,7 @@ err:
 int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 {
        struct hl_device *hdev = ctx->hdev;
-       u64 real_virt_addr;
+       u64 real_virt_addr, real_phys_addr;
        u32 real_page_size, npages;
        int i, rc, mapped_cnt = 0;
 
@@ -857,14 +857,16 @@ int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size)
 
        npages = page_size / real_page_size;
        real_virt_addr = virt_addr;
+       real_phys_addr = phys_addr;
 
        for (i = 0 ; i < npages ; i++) {
-               rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr,
+               rc = _hl_mmu_map(ctx, real_virt_addr, real_phys_addr,
                                real_page_size);
                if (rc)
                        goto err;
 
                real_virt_addr += real_page_size;
+               real_phys_addr += real_page_size;
                mapped_cnt++;
        }
 
index 5e4ca082cfcdb29845326adf3341d1dd3b461811..7a96d168efc41dce1510fbac29522ce06851a8db 100644 (file)
@@ -216,8 +216,8 @@ config GENEVE
 
 config GTP
        tristate "GPRS Tunneling Protocol datapath (GTP-U)"
-       depends on INET && NET_UDP_TUNNEL
-       select NET_IP_TUNNEL
+       depends on INET
+       select NET_UDP_TUNNEL
        ---help---
          This allows one to create gtp virtual interfaces that provide
          the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
index 576b37d12a63ca4ea5064cd568194ec25fa22ee5..c4fa400efdcc82643dcd4d2c762ed8079305adf7 100644 (file)
@@ -481,6 +481,155 @@ qca8k_port_set_status(struct qca8k_priv *priv, int port, int enable)
                qca8k_reg_clear(priv, QCA8K_REG_PORT_STATUS(port), mask);
 }
 
+static u32
+qca8k_port_to_phy(int port)
+{
+       /* From Andrew Lunn:
+        * Port 0 has no internal phy.
+        * Port 1 has an internal PHY at MDIO address 0.
+        * Port 2 has an internal PHY at MDIO address 1.
+        * ...
+        * Port 5 has an internal PHY at MDIO address 4.
+        * Port 6 has no internal PHY.
+        */
+
+       return port - 1;
+}
+
+static int
+qca8k_mdio_write(struct qca8k_priv *priv, int port, u32 regnum, u16 data)
+{
+       u32 phy, val;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       /* callee is responsible for not passing bad ports,
+        * but we still would like to make spills impossible.
+        */
+       phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_WRITE | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum) |
+             QCA8K_MDIO_MASTER_DATA(data);
+
+       qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+       return qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+               QCA8K_MDIO_MASTER_BUSY);
+}
+
+static int
+qca8k_mdio_read(struct qca8k_priv *priv, int port, u32 regnum)
+{
+       u32 phy, val;
+
+       if (regnum >= QCA8K_MDIO_MASTER_MAX_REG)
+               return -EINVAL;
+
+       /* callee is responsible for not passing bad ports,
+        * but we still would like to make spills impossible.
+        */
+       phy = qca8k_port_to_phy(port) % PHY_MAX_ADDR;
+       val = QCA8K_MDIO_MASTER_BUSY | QCA8K_MDIO_MASTER_EN |
+             QCA8K_MDIO_MASTER_READ | QCA8K_MDIO_MASTER_PHY_ADDR(phy) |
+             QCA8K_MDIO_MASTER_REG_ADDR(regnum);
+
+       qca8k_write(priv, QCA8K_MDIO_MASTER_CTRL, val);
+
+       if (qca8k_busy_wait(priv, QCA8K_MDIO_MASTER_CTRL,
+                           QCA8K_MDIO_MASTER_BUSY))
+               return -ETIMEDOUT;
+
+       val = (qca8k_read(priv, QCA8K_MDIO_MASTER_CTRL) &
+               QCA8K_MDIO_MASTER_DATA_MASK);
+
+       return val;
+}
+
+static int
+qca8k_phy_write(struct dsa_switch *ds, int port, int regnum, u16 data)
+{
+       struct qca8k_priv *priv = ds->priv;
+
+       return qca8k_mdio_write(priv, port, regnum, data);
+}
+
+static int
+qca8k_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+       struct qca8k_priv *priv = ds->priv;
+       int ret;
+
+       ret = qca8k_mdio_read(priv, port, regnum);
+
+       if (ret < 0)
+               return 0xffff;
+
+       return ret;
+}
+
+static int
+qca8k_setup_mdio_bus(struct qca8k_priv *priv)
+{
+       u32 internal_mdio_mask = 0, external_mdio_mask = 0, reg;
+       struct device_node *ports, *port;
+       int err;
+
+       ports = of_get_child_by_name(priv->dev->of_node, "ports");
+       if (!ports)
+               return -EINVAL;
+
+       for_each_available_child_of_node(ports, port) {
+               err = of_property_read_u32(port, "reg", &reg);
+               if (err)
+                       return err;
+
+               if (!dsa_is_user_port(priv->ds, reg))
+                       continue;
+
+               if (of_property_read_bool(port, "phy-handle"))
+                       external_mdio_mask |= BIT(reg);
+               else
+                       internal_mdio_mask |= BIT(reg);
+       }
+
+       if (!external_mdio_mask && !internal_mdio_mask) {
+               dev_err(priv->dev, "no PHYs are defined.\n");
+               return -EINVAL;
+       }
+
+       /* The QCA8K_MDIO_MASTER_EN Bit, which grants access to PHYs through
+        * the MDIO_MASTER register also _disconnects_ the external MDC
+        * passthrough to the internal PHYs. It's not possible to use both
+        * configurations at the same time!
+        *
+        * Because this came up during the review process:
+        * If the external mdio-bus driver is capable magically disabling
+        * the QCA8K_MDIO_MASTER_EN and mutex/spin-locking out the qca8k's
+        * accessors for the time being, it would be possible to pull this
+        * off.
+        */
+       if (!!external_mdio_mask && !!internal_mdio_mask) {
+               dev_err(priv->dev, "either internal or external mdio bus configuration is supported.\n");
+               return -EINVAL;
+       }
+
+       if (external_mdio_mask) {
+               /* Make sure to disable the internal mdio bus in cases
+                * a dt-overlay and driver reload changed the configuration
+                */
+
+               qca8k_reg_clear(priv, QCA8K_MDIO_MASTER_CTRL,
+                               QCA8K_MDIO_MASTER_EN);
+               return 0;
+       }
+
+       priv->ops.phy_read = qca8k_phy_read;
+       priv->ops.phy_write = qca8k_phy_write;
+       return 0;
+}
+
 static int
 qca8k_setup(struct dsa_switch *ds)
 {
@@ -502,6 +651,10 @@ qca8k_setup(struct dsa_switch *ds)
        if (IS_ERR(priv->regmap))
                pr_warn("regmap initialization failed");
 
+       ret = qca8k_setup_mdio_bus(priv);
+       if (ret)
+               return ret;
+
        /* Initialize CPU port pad mode (xMII type, delays...) */
        phy_mode = of_get_phy_mode(ds->ports[QCA8K_CPU_PORT].dn);
        if (phy_mode < 0) {
@@ -624,22 +777,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
        qca8k_port_set_status(priv, port, 1);
 }
 
-static int
-qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       return mdiobus_read(priv->bus, phy, regnum);
-}
-
-static int
-qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
-{
-       struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
-
-       return mdiobus_write(priv->bus, phy, regnum, val);
-}
-
 static void
 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
 {
@@ -879,8 +1016,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
        .setup                  = qca8k_setup,
        .adjust_link            = qca8k_adjust_link,
        .get_strings            = qca8k_get_strings,
-       .phy_read               = qca8k_phy_read,
-       .phy_write              = qca8k_phy_write,
        .get_ethtool_stats      = qca8k_get_ethtool_stats,
        .get_sset_count         = qca8k_get_sset_count,
        .get_mac_eee            = qca8k_get_mac_eee,
@@ -923,7 +1058,8 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
                return -ENOMEM;
 
        priv->ds->priv = priv;
-       priv->ds->ops = &qca8k_switch_ops;
+       priv->ops = qca8k_switch_ops;
+       priv->ds->ops = &priv->ops;
        mutex_init(&priv->reg_mutex);
        dev_set_drvdata(&mdiodev->dev, priv);
 
index d146e54c8a6c615045ff18b31b413fba08365221..249fd62268e5450ff41fa33ef7d7ba055d35461c 100644 (file)
 #define   QCA8K_MIB_FLUSH                              BIT(24)
 #define   QCA8K_MIB_CPU_KEEP                           BIT(20)
 #define   QCA8K_MIB_BUSY                               BIT(17)
+#define QCA8K_MDIO_MASTER_CTRL                         0x3c
+#define   QCA8K_MDIO_MASTER_BUSY                       BIT(31)
+#define   QCA8K_MDIO_MASTER_EN                         BIT(30)
+#define   QCA8K_MDIO_MASTER_READ                       BIT(27)
+#define   QCA8K_MDIO_MASTER_WRITE                      0
+#define   QCA8K_MDIO_MASTER_SUP_PRE                    BIT(26)
+#define   QCA8K_MDIO_MASTER_PHY_ADDR(x)                        ((x) << 21)
+#define   QCA8K_MDIO_MASTER_REG_ADDR(x)                        ((x) << 16)
+#define   QCA8K_MDIO_MASTER_DATA(x)                    (x)
+#define   QCA8K_MDIO_MASTER_DATA_MASK                  GENMASK(15, 0)
+#define   QCA8K_MDIO_MASTER_MAX_PORTS                  5
+#define   QCA8K_MDIO_MASTER_MAX_REG                    32
 #define QCA8K_GOL_MAC_ADDR0                            0x60
 #define QCA8K_GOL_MAC_ADDR1                            0x64
 #define QCA8K_REG_PORT_STATUS(_i)                      (0x07c + (_i) * 4)
@@ -169,6 +181,7 @@ struct qca8k_priv {
        struct dsa_switch *ds;
        struct mutex reg_mutex;
        struct device *dev;
+       struct dsa_switch_ops ops;
 };
 
 struct qca8k_mib_desc {
index 808abb6b367134e76a79dd2a9bf857d02559af65..b15752267c8dfde6d40b6296154e2cd350b185e4 100644 (file)
@@ -1521,7 +1521,7 @@ static void update_stats(int ioaddr, struct net_device *dev)
 static void set_rx_mode(struct net_device *dev)
 {
        int ioaddr = dev->base_addr;
-       short new_mode;
+       unsigned short new_mode;
 
        if (dev->flags & IFF_PROMISC) {
                if (corkscrew_debug > 3)
index 342ae08ec3c29832ae5be0da8d93e59d6441cab1..d60a86aa8aa8049e7c5216f15f64b6d8406ec115 100644 (file)
@@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
 static void dayna_block_output(struct net_device *dev, int count,
                               const unsigned char *buf, int start_page);
 
-#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
-
 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
 static void slow_sane_get_8390_hdr(struct net_device *dev,
                                   struct e8390_pkt_hdr *hdr, int ring_page);
@@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
 
 static enum mac8390_access mac8390_testio(unsigned long membase)
 {
-       unsigned long outdata = 0xA5A0B5B0;
-       unsigned long indata =  0x00000000;
+       u32 outdata = 0xA5A0B5B0;
+       u32 indata = 0;
+
        /* Try writing 32 bits */
-       memcpy_toio((void __iomem *)membase, &outdata, 4);
-       /* Now compare them */
-       if (memcmp_withio(&outdata, membase, 4) == 0)
+       nubus_writel(outdata, membase);
+       /* Now read it back */
+       indata = nubus_readl(membase);
+       if (outdata == indata)
                return ACCESS_32;
+
+       outdata = 0xC5C0D5D0;
+       indata = 0;
+
        /* Write 16 bit output */
        word_memcpy_tocard(membase, &outdata, 4);
        /* Now read it back */
        word_memcpy_fromcard(&indata, membase, 4);
        if (outdata == indata)
                return ACCESS_16;
+
        return ACCESS_UNKNOWN;
 }
 
index 74550ccc7a20ff8437463384e906b718027dc6ef..e2ffb159cbe2eeb5980a89aa688ebde8826fc7e6 100644 (file)
@@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
        }
        if (buff->is_ip_cso) {
                __skb_incr_checksum_unnecessary(skb);
-               if (buff->is_udp_cso || buff->is_tcp_cso)
-                       __skb_incr_checksum_unnecessary(skb);
        } else {
                skb->ip_summed = CHECKSUM_NONE;
        }
+
+       if (buff->is_udp_cso || buff->is_tcp_cso)
+               __skb_incr_checksum_unnecessary(skb);
 }
 
 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
index ad099fd01b45ae947492e828337c76df6d701587..1522aee81884bdf702b32e1cd8cbae4316e988b5 100644 (file)
@@ -3370,14 +3370,20 @@ static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
                *hclk = devm_clk_get(&pdev->dev, "hclk");
        }
 
-       if (IS_ERR(*pclk)) {
+       if (IS_ERR_OR_NULL(*pclk)) {
                err = PTR_ERR(*pclk);
+               if (!err)
+                       err = -ENODEV;
+
                dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
                return err;
        }
 
-       if (IS_ERR(*hclk)) {
+       if (IS_ERR_OR_NULL(*hclk)) {
                err = PTR_ERR(*hclk);
+               if (!err)
+                       err = -ENODEV;
+
                dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
                return err;
        }
index 3130b43bba52c9570e76223bf2779c3f3c076c34..02959035ed3f21287a3673f93c55f0e76b549de1 100644 (file)
@@ -2620,7 +2620,7 @@ static inline struct port_info *ethqset2pinfo(struct adapter *adap, int qset)
        }
 
        /* should never happen! */
-       BUG_ON(1);
+       BUG();
        return NULL;
 }
 
index 88773ca58e6b1fc45dce1eeea8064174b67407d9..b3da81e90132fd74d26b007ca5414a066547774f 100644 (file)
@@ -476,7 +476,7 @@ static inline int get_buf_size(struct adapter *adapter,
                break;
 
        default:
-               BUG_ON(1);
+               BUG();
        }
 
        return buf_size;
index 2ba49e959c3fd391115740988ae44b3c0698b4d5..dc339dc1adb21c30224fbce6eb0d60fd861c9388 100644 (file)
@@ -815,6 +815,14 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
         */
        queue_mapping = skb_get_queue_mapping(skb);
        fq = &priv->fq[queue_mapping];
+
+       fd_len = dpaa2_fd_get_len(&fd);
+       nq = netdev_get_tx_queue(net_dev, queue_mapping);
+       netdev_tx_sent_queue(nq, fd_len);
+
+       /* Everything that happens after this enqueues might race with
+        * the Tx confirmation callback for this frame
+        */
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
                err = priv->enqueue(priv, fq, &fd, 0);
                if (err != -EBUSY)
@@ -825,13 +833,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
                percpu_stats->tx_errors++;
                /* Clean up everything, including freeing the skb */
                free_tx_fd(priv, fq, &fd, false);
+               netdev_tx_completed_queue(nq, 1, fd_len);
        } else {
-               fd_len = dpaa2_fd_get_len(&fd);
                percpu_stats->tx_packets++;
                percpu_stats->tx_bytes += fd_len;
-
-               nq = netdev_get_tx_queue(net_dev, queue_mapping);
-               netdev_tx_sent_queue(nq, fd_len);
        }
 
        return NETDEV_TX_OK;
@@ -1817,7 +1822,7 @@ static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
        dpaa2_fd_set_format(&fd, dpaa2_fd_single);
        dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
 
-       fq = &priv->fq[smp_processor_id()];
+       fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
        for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
                err = priv->enqueue(priv, fq, &fd, 0);
                if (err != -EBUSY)
index 1c1f17ec6be2dfa11cd1208b0b04273f07855c10..162cb9afa0e705d1e7d668c1cf34eae64f4d65f2 100644 (file)
@@ -22,6 +22,7 @@
 #include "hns3_enet.h"
 
 #define hns3_set_field(origin, shift, val)     ((origin) |= ((val) << (shift)))
+#define hns3_tx_bd_count(S)    DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
 
 static void hns3_clear_all_ring(struct hnae3_handle *h);
 static void hns3_force_clear_all_rx_ring(struct hnae3_handle *h);
@@ -1079,7 +1080,7 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
 
        desc_cb->length = size;
 
-       frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+       frag_buf_num = hns3_tx_bd_count(size);
        sizeoflast = size & HNS3_TX_LAST_SIZE_M;
        sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
 
@@ -1124,14 +1125,13 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
        int i;
 
        size = skb_headlen(skb);
-       buf_num = (size + HNS3_MAX_BD_SIZE - 1) >> HNS3_MAX_BD_SIZE_OFFSET;
+       buf_num = hns3_tx_bd_count(size);
 
        frag_num = skb_shinfo(skb)->nr_frags;
        for (i = 0; i < frag_num; i++) {
                frag = &skb_shinfo(skb)->frags[i];
                size = skb_frag_size(frag);
-               bdnum_for_frag = (size + HNS3_MAX_BD_SIZE - 1) >>
-                                HNS3_MAX_BD_SIZE_OFFSET;
+               bdnum_for_frag = hns3_tx_bd_count(size);
                if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
                        return -ENOMEM;
 
@@ -1139,8 +1139,7 @@ static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
        }
 
        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
-               buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) >>
-                         HNS3_MAX_BD_SIZE_OFFSET;
+               buf_num = hns3_tx_bd_count(skb->len);
                if (ring_space(ring) < buf_num)
                        return -EBUSY;
                /* manual split the send packet */
@@ -1169,7 +1168,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
        buf_num = skb_shinfo(skb)->nr_frags + 1;
 
        if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) {
-               buf_num = (skb->len + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
+               buf_num = hns3_tx_bd_count(skb->len);
                if (ring_space(ring) < buf_num)
                        return -EBUSY;
                /* manual split the send packet */
index 1db0bd41d20961f931f464850b8e5f1395276f76..75669cd0c31145fd763959f226175452dbb399bf 100644 (file)
@@ -193,7 +193,6 @@ enum hns3_nic_state {
 #define HNS3_VECTOR_INITED                     1
 
 #define HNS3_MAX_BD_SIZE                       65535
-#define HNS3_MAX_BD_SIZE_OFFSET                16
 #define HNS3_MAX_BD_PER_FRAG                   8
 #define HNS3_MAX_BD_PER_PKT                    MAX_SKB_FRAGS
 
index 3baabdc897262698ab23b4bc1dedec22edc89919..90b62c1412c8f4715eaf1ab3ca14a9128f1f9046 100644 (file)
@@ -3160,6 +3160,7 @@ static ssize_t ehea_probe_port(struct device *dev,
 
        if (ehea_add_adapter_mr(adapter)) {
                pr_err("creating MR failed\n");
+               of_node_put(eth_dn);
                return -EIO;
        }
 
index 7a15e932ed2f5c8ddaee26ab078c943786cac421..c1c1965d7accabca443888932c30090564433d7c 100644 (file)
@@ -113,7 +113,7 @@ int mlxsw_env_module_temp_thresholds_get(struct mlxsw_core *core, int module,
                return 0;
        default:
                /* Do not consider thresholds for zero temperature. */
-               if (!MLXSW_REG_MTMP_TEMP_TO_MC(module_temp)) {
+               if (MLXSW_REG_MTMP_TEMP_TO_MC(module_temp) == 0) {
                        *temp = 0;
                        return 0;
                }
index bd6e9014bc74794b9a8a7e680f5b59ea7048382f..7849119d407aef1a7b92d6b0e047a8f74b4867f7 100644 (file)
@@ -142,6 +142,12 @@ struct ks8851_net {
 
 static int msg_enable;
 
+/* SPI frame opcodes */
+#define KS_SPIOP_RD    (0x00)
+#define KS_SPIOP_WR    (0x40)
+#define KS_SPIOP_RXFIFO        (0x80)
+#define KS_SPIOP_TXFIFO        (0xC0)
+
 /* shift for byte-enable data */
 #define BYTE_EN(_x)    ((_x) << 2)
 
@@ -535,9 +541,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                /* set dma read address */
                ks8851_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI | 0x00);
 
-               /* start the packet dma process, and set auto-dequeue rx */
-               ks8851_wrreg16(ks, KS_RXQCR,
-                              ks->rc_rxqcr | RXQCR_SDA | RXQCR_ADRFE);
+               /* start DMA access */
+               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
 
                if (rxlen > 4) {
                        unsigned int rxalign;
@@ -568,7 +573,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
                        }
                }
 
-               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
+               /* end DMA access and dequeue packet */
+               ks8851_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_RRXEF);
        }
 }
 
@@ -785,6 +791,15 @@ static void ks8851_tx_work(struct work_struct *work)
 static int ks8851_net_open(struct net_device *dev)
 {
        struct ks8851_net *ks = netdev_priv(dev);
+       int ret;
+
+       ret = request_threaded_irq(dev->irq, NULL, ks8851_irq,
+                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+                                  dev->name, ks);
+       if (ret < 0) {
+               netdev_err(dev, "failed to get irq\n");
+               return ret;
+       }
 
        /* lock the card, even if we may not actually be doing anything
         * else at the moment */
@@ -849,6 +864,7 @@ static int ks8851_net_open(struct net_device *dev)
        netif_dbg(ks, ifup, ks->netdev, "network device up\n");
 
        mutex_unlock(&ks->lock);
+       mii_check_link(&ks->mii);
        return 0;
 }
 
@@ -899,6 +915,8 @@ static int ks8851_net_stop(struct net_device *dev)
                dev_kfree_skb(txb);
        }
 
+       free_irq(dev->irq, ks);
+
        return 0;
 }
 
@@ -1508,6 +1526,7 @@ static int ks8851_probe(struct spi_device *spi)
 
        spi_set_drvdata(spi, ks);
 
+       netif_carrier_off(ks->netdev);
        ndev->if_port = IF_PORT_100BASET;
        ndev->netdev_ops = &ks8851_netdev_ops;
        ndev->irq = spi->irq;
@@ -1529,14 +1548,6 @@ static int ks8851_probe(struct spi_device *spi)
        ks8851_read_selftest(ks);
        ks8851_init_mac(ks);
 
-       ret = request_threaded_irq(spi->irq, NULL, ks8851_irq,
-                                  IRQF_TRIGGER_LOW | IRQF_ONESHOT,
-                                  ndev->name, ks);
-       if (ret < 0) {
-               dev_err(&spi->dev, "failed to get irq\n");
-               goto err_irq;
-       }
-
        ret = register_netdev(ndev);
        if (ret) {
                dev_err(&spi->dev, "failed to register network device\n");
@@ -1549,14 +1560,10 @@ static int ks8851_probe(struct spi_device *spi)
 
        return 0;
 
-
 err_netdev:
-       free_irq(ndev->irq, ks);
-
-err_irq:
+err_id:
        if (gpio_is_valid(gpio))
                gpio_set_value(gpio, 0);
-err_id:
        regulator_disable(ks->vdd_reg);
 err_reg:
        regulator_disable(ks->vdd_io);
@@ -1574,7 +1581,6 @@ static int ks8851_remove(struct spi_device *spi)
                dev_info(&spi->dev, "remove\n");
 
        unregister_netdev(priv->netdev);
-       free_irq(spi->irq, priv);
        if (gpio_is_valid(priv->gpio))
                gpio_set_value(priv->gpio, 0);
        regulator_disable(priv->vdd_reg);
index 852256ef1f2233b9d60efa0b01da45e128514a14..23da1e3ee429af922c603b5a53e70820e398f38a 100644 (file)
 */
 
 #define KS_CCR                                 0x08
+#define CCR_LE                                 (1 << 10)   /* KSZ8851-16MLL */
 #define CCR_EEPROM                             (1 << 9)
-#define CCR_SPI                                        (1 << 8)
-#define CCR_32PIN                              (1 << 0)
+#define CCR_SPI                                        (1 << 8)    /* KSZ8851SNL    */
+#define CCR_8BIT                               (1 << 7)    /* KSZ8851-16MLL */
+#define CCR_16BIT                              (1 << 6)    /* KSZ8851-16MLL */
+#define CCR_32BIT                              (1 << 5)    /* KSZ8851-16MLL */
+#define CCR_SHARED                             (1 << 4)    /* KSZ8851-16MLL */
+#define CCR_48PIN                              (1 << 1)    /* KSZ8851-16MLL */
+#define CCR_32PIN                              (1 << 0)    /* KSZ8851SNL    */
 
 /* MAC address registers */
 #define KS_MAR(_m)                             (0x15 - (_m))
 #define RXCR1_RXE                              (1 << 0)
 
 #define KS_RXCR2                               0x76
-#define RXCR2_SRDBL_MASK                       (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT                      (5)
-#define RXCR2_SRDBL_4B                         (0x0 << 5)
-#define RXCR2_SRDBL_8B                         (0x1 << 5)
-#define RXCR2_SRDBL_16B                                (0x2 << 5)
-#define RXCR2_SRDBL_32B                                (0x3 << 5)
-#define RXCR2_SRDBL_FRAME                      (0x4 << 5)
+#define RXCR2_SRDBL_MASK                       (0x7 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_SHIFT                      (5)         /* KSZ8851SNL    */
+#define RXCR2_SRDBL_4B                         (0x0 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_8B                         (0x1 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_16B                                (0x2 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_32B                                (0x3 << 5)  /* KSZ8851SNL    */
+#define RXCR2_SRDBL_FRAME                      (0x4 << 5)  /* KSZ8851SNL    */
 #define RXCR2_IUFFP                            (1 << 4)
 #define RXCR2_RXIUFCEZ                         (1 << 3)
 #define RXCR2_UDPLFE                           (1 << 2)
 #define RXFSHR_RXCE                            (1 << 0)
 
 #define KS_RXFHBCR                             0x7E
+#define RXFHBCR_CNT_MASK                       (0xfff << 0)
+
 #define KS_TXQCR                               0x80
-#define TXQCR_AETFE                            (1 << 2)
+#define TXQCR_AETFE                            (1 << 2)    /* KSZ8851SNL    */
 #define TXQCR_TXQMAM                           (1 << 1)
 #define TXQCR_METFE                            (1 << 0)
 
 
 #define KS_RXFDPR                              0x86
 #define RXFDPR_RXFPAI                          (1 << 14)
+#define RXFDPR_WST                             (1 << 12)   /* KSZ8851-16MLL */
+#define RXFDPR_EMS                             (1 << 11)   /* KSZ8851-16MLL */
+#define RXFDPR_RXFP_MASK                       (0x7ff << 0)
+#define RXFDPR_RXFP_SHIFT                      (0)
 
 #define KS_RXDTTR                              0x8C
 #define KS_RXDBCTR                             0x8E
 #define IRQ_RXMPDI                             (1 << 4)
 #define IRQ_LDI                                        (1 << 3)
 #define IRQ_EDI                                        (1 << 2)
-#define IRQ_SPIBEI                             (1 << 1)
+#define IRQ_SPIBEI                             (1 << 1)    /* KSZ8851SNL    */
 #define IRQ_DEDI                               (1 << 0)
 
 #define KS_RXFCTR                              0x9C
 #define KS_P1ANLPR                             0xEE
 
 #define KS_P1SCLMD                             0xF4
-#define P1SCLMD_LEDOFF                         (1 << 15)
-#define P1SCLMD_TXIDS                          (1 << 14)
-#define P1SCLMD_RESTARTAN                      (1 << 13)
-#define P1SCLMD_DISAUTOMDIX                    (1 << 10)
-#define P1SCLMD_FORCEMDIX                      (1 << 9)
-#define P1SCLMD_AUTONEGEN                      (1 << 7)
-#define P1SCLMD_FORCE100                       (1 << 6)
-#define P1SCLMD_FORCEFDX                       (1 << 5)
-#define P1SCLMD_ADV_FLOW                       (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX                  (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX                  (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX                   (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX                   (1 << 0)
 
 #define KS_P1CR                                        0xF6
-#define P1CR_HP_MDIX                           (1 << 15)
-#define P1CR_REV_POL                           (1 << 13)
-#define P1CR_OP_100M                           (1 << 10)
-#define P1CR_OP_FDX                            (1 << 9)
-#define P1CR_OP_MDI                            (1 << 7)
-#define P1CR_AN_DONE                           (1 << 6)
-#define P1CR_LINK_GOOD                         (1 << 5)
-#define P1CR_PNTR_FLOW                         (1 << 4)
-#define P1CR_PNTR_100BT_FDX                    (1 << 3)
-#define P1CR_PNTR_100BT_HDX                    (1 << 2)
-#define P1CR_PNTR_10BT_FDX                     (1 << 1)
-#define P1CR_PNTR_10BT_HDX                     (1 << 0)
+#define P1CR_LEDOFF                            (1 << 15)
+#define P1CR_TXIDS                             (1 << 14)
+#define P1CR_RESTARTAN                         (1 << 13)
+#define P1CR_DISAUTOMDIX                       (1 << 10)
+#define P1CR_FORCEMDIX                         (1 << 9)
+#define P1CR_AUTONEGEN                         (1 << 7)
+#define P1CR_FORCE100                          (1 << 6)
+#define P1CR_FORCEFDX                          (1 << 5)
+#define P1CR_ADV_FLOW                          (1 << 4)
+#define P1CR_ADV_100BT_FDX                     (1 << 3)
+#define P1CR_ADV_100BT_HDX                     (1 << 2)
+#define P1CR_ADV_10BT_FDX                      (1 << 1)
+#define P1CR_ADV_10BT_HDX                      (1 << 0)
+
+#define KS_P1SR                                        0xF8
+#define P1SR_HP_MDIX                           (1 << 15)
+#define P1SR_REV_POL                           (1 << 13)
+#define P1SR_OP_100M                           (1 << 10)
+#define P1SR_OP_FDX                            (1 << 9)
+#define P1SR_OP_MDI                            (1 << 7)
+#define P1SR_AN_DONE                           (1 << 6)
+#define P1SR_LINK_GOOD                         (1 << 5)
+#define P1SR_PNTR_FLOW                         (1 << 4)
+#define P1SR_PNTR_100BT_FDX                    (1 << 3)
+#define P1SR_PNTR_100BT_HDX                    (1 << 2)
+#define P1SR_PNTR_10BT_FDX                     (1 << 1)
+#define P1SR_PNTR_10BT_HDX                     (1 << 0)
 
 /* TX Frame control */
-
 #define TXFR_TXIC                              (1 << 15)
 #define TXFR_TXFID_MASK                                (0x3f << 0)
 #define TXFR_TXFID_SHIFT                       (0)
-
-/* SPI frame opcodes */
-#define KS_SPIOP_RD                            (0x00)
-#define KS_SPIOP_WR                            (0x40)
-#define KS_SPIOP_RXFIFO                                (0x80)
-#define KS_SPIOP_TXFIFO                                (0xC0)
index 35f8c9ef204d91cd4c17591d84ebab597cff33b4..c946841c0a066d2e7eabd059092ed6cbbb156b01 100644 (file)
@@ -40,6 +40,8 @@
 #include <linux/of_device.h>
 #include <linux/of_net.h>
 
+#include "ks8851.h"
+
 #define        DRV_NAME        "ks8851_mll"
 
 static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
@@ -48,319 +50,10 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
 #define TX_BUF_SIZE                    2000
 #define RX_BUF_SIZE                    2000
 
-#define KS_CCR                         0x08
-#define CCR_EEPROM                     (1 << 9)
-#define CCR_SPI                                (1 << 8)
-#define CCR_8BIT                       (1 << 7)
-#define CCR_16BIT                      (1 << 6)
-#define CCR_32BIT                      (1 << 5)
-#define CCR_SHARED                     (1 << 4)
-#define CCR_32PIN                      (1 << 0)
-
-/* MAC address registers */
-#define KS_MARL                                0x10
-#define KS_MARM                                0x12
-#define KS_MARH                                0x14
-
-#define KS_OBCR                                0x20
-#define OBCR_ODS_16MA                  (1 << 6)
-
-#define KS_EEPCR                       0x22
-#define EEPCR_EESA                     (1 << 4)
-#define EEPCR_EESB                     (1 << 3)
-#define EEPCR_EEDO                     (1 << 2)
-#define EEPCR_EESCK                    (1 << 1)
-#define EEPCR_EECS                     (1 << 0)
-
-#define KS_MBIR                                0x24
-#define MBIR_TXMBF                     (1 << 12)
-#define MBIR_TXMBFA                    (1 << 11)
-#define MBIR_RXMBF                     (1 << 4)
-#define MBIR_RXMBFA                    (1 << 3)
-
-#define KS_GRR                         0x26
-#define GRR_QMU                                (1 << 1)
-#define GRR_GSR                                (1 << 0)
-
-#define KS_WFCR                                0x2A
-#define WFCR_MPRXE                     (1 << 7)
-#define WFCR_WF3E                      (1 << 3)
-#define WFCR_WF2E                      (1 << 2)
-#define WFCR_WF1E                      (1 << 1)
-#define WFCR_WF0E                      (1 << 0)
-
-#define KS_WF0CRC0                     0x30
-#define KS_WF0CRC1                     0x32
-#define KS_WF0BM0                      0x34
-#define KS_WF0BM1                      0x36
-#define KS_WF0BM2                      0x38
-#define KS_WF0BM3                      0x3A
-
-#define KS_WF1CRC0                     0x40
-#define KS_WF1CRC1                     0x42
-#define KS_WF1BM0                      0x44
-#define KS_WF1BM1                      0x46
-#define KS_WF1BM2                      0x48
-#define KS_WF1BM3                      0x4A
-
-#define KS_WF2CRC0                     0x50
-#define KS_WF2CRC1                     0x52
-#define KS_WF2BM0                      0x54
-#define KS_WF2BM1                      0x56
-#define KS_WF2BM2                      0x58
-#define KS_WF2BM3                      0x5A
-
-#define KS_WF3CRC0                     0x60
-#define KS_WF3CRC1                     0x62
-#define KS_WF3BM0                      0x64
-#define KS_WF3BM1                      0x66
-#define KS_WF3BM2                      0x68
-#define KS_WF3BM3                      0x6A
-
-#define KS_TXCR                                0x70
-#define TXCR_TCGICMP                   (1 << 8)
-#define TXCR_TCGUDP                    (1 << 7)
-#define TXCR_TCGTCP                    (1 << 6)
-#define TXCR_TCGIP                     (1 << 5)
-#define TXCR_FTXQ                      (1 << 4)
-#define TXCR_TXFCE                     (1 << 3)
-#define TXCR_TXPE                      (1 << 2)
-#define TXCR_TXCRC                     (1 << 1)
-#define TXCR_TXE                       (1 << 0)
-
-#define KS_TXSR                                0x72
-#define TXSR_TXLC                      (1 << 13)
-#define TXSR_TXMC                      (1 << 12)
-#define TXSR_TXFID_MASK                        (0x3f << 0)
-#define TXSR_TXFID_SHIFT               (0)
-#define TXSR_TXFID_GET(_v)             (((_v) >> 0) & 0x3f)
-
-
-#define KS_RXCR1                       0x74
-#define RXCR1_FRXQ                     (1 << 15)
-#define RXCR1_RXUDPFCC                 (1 << 14)
-#define RXCR1_RXTCPFCC                 (1 << 13)
-#define RXCR1_RXIPFCC                  (1 << 12)
-#define RXCR1_RXPAFMA                  (1 << 11)
-#define RXCR1_RXFCE                    (1 << 10)
-#define RXCR1_RXEFE                    (1 << 9)
-#define RXCR1_RXMAFMA                  (1 << 8)
-#define RXCR1_RXBE                     (1 << 7)
-#define RXCR1_RXME                     (1 << 6)
-#define RXCR1_RXUE                     (1 << 5)
-#define RXCR1_RXAE                     (1 << 4)
-#define RXCR1_RXINVF                   (1 << 1)
-#define RXCR1_RXE                      (1 << 0)
 #define RXCR1_FILTER_MASK              (RXCR1_RXINVF | RXCR1_RXAE | \
                                         RXCR1_RXMAFMA | RXCR1_RXPAFMA)
-
-#define KS_RXCR2                       0x76
-#define RXCR2_SRDBL_MASK               (0x7 << 5)
-#define RXCR2_SRDBL_SHIFT              (5)
-#define RXCR2_SRDBL_4B                 (0x0 << 5)
-#define RXCR2_SRDBL_8B                 (0x1 << 5)
-#define RXCR2_SRDBL_16B                        (0x2 << 5)
-#define RXCR2_SRDBL_32B                        (0x3 << 5)
-/* #define RXCR2_SRDBL_FRAME           (0x4 << 5) */
-#define RXCR2_IUFFP                    (1 << 4)
-#define RXCR2_RXIUFCEZ                 (1 << 3)
-#define RXCR2_UDPLFE                   (1 << 2)
-#define RXCR2_RXICMPFCC                        (1 << 1)
-#define RXCR2_RXSAF                    (1 << 0)
-
-#define KS_TXMIR                       0x78
-
-#define KS_RXFHSR                      0x7C
-#define RXFSHR_RXFV                    (1 << 15)
-#define RXFSHR_RXICMPFCS               (1 << 13)
-#define RXFSHR_RXIPFCS                 (1 << 12)
-#define RXFSHR_RXTCPFCS                        (1 << 11)
-#define RXFSHR_RXUDPFCS                        (1 << 10)
-#define RXFSHR_RXBF                    (1 << 7)
-#define RXFSHR_RXMF                    (1 << 6)
-#define RXFSHR_RXUF                    (1 << 5)
-#define RXFSHR_RXMR                    (1 << 4)
-#define RXFSHR_RXFT                    (1 << 3)
-#define RXFSHR_RXFTL                   (1 << 2)
-#define RXFSHR_RXRF                    (1 << 1)
-#define RXFSHR_RXCE                    (1 << 0)
-#define        RXFSHR_ERR                      (RXFSHR_RXCE | RXFSHR_RXRF |\
-                                       RXFSHR_RXFTL | RXFSHR_RXMR |\
-                                       RXFSHR_RXICMPFCS | RXFSHR_RXIPFCS |\
-                                       RXFSHR_RXTCPFCS)
-#define KS_RXFHBCR                     0x7E
-#define RXFHBCR_CNT_MASK               0x0FFF
-
-#define KS_TXQCR                       0x80
-#define TXQCR_AETFE                    (1 << 2)
-#define TXQCR_TXQMAM                   (1 << 1)
-#define TXQCR_METFE                    (1 << 0)
-
-#define KS_RXQCR                       0x82
-#define RXQCR_RXDTTS                   (1 << 12)
-#define RXQCR_RXDBCTS                  (1 << 11)
-#define RXQCR_RXFCTS                   (1 << 10)
-#define RXQCR_RXIPHTOE                 (1 << 9)
-#define RXQCR_RXDTTE                   (1 << 7)
-#define RXQCR_RXDBCTE                  (1 << 6)
-#define RXQCR_RXFCTE                   (1 << 5)
-#define RXQCR_ADRFE                    (1 << 4)
-#define RXQCR_SDA                      (1 << 3)
-#define RXQCR_RRXEF                    (1 << 0)
 #define RXQCR_CMD_CNTL                 (RXQCR_RXFCTE|RXQCR_ADRFE)
 
-#define KS_TXFDPR                      0x84
-#define TXFDPR_TXFPAI                  (1 << 14)
-#define TXFDPR_TXFP_MASK               (0x7ff << 0)
-#define TXFDPR_TXFP_SHIFT              (0)
-
-#define KS_RXFDPR                      0x86
-#define RXFDPR_RXFPAI                  (1 << 14)
-
-#define KS_RXDTTR                      0x8C
-#define KS_RXDBCTR                     0x8E
-
-#define KS_IER                         0x90
-#define KS_ISR                         0x92
-#define IRQ_LCI                                (1 << 15)
-#define IRQ_TXI                                (1 << 14)
-#define IRQ_RXI                                (1 << 13)
-#define IRQ_RXOI                       (1 << 11)
-#define IRQ_TXPSI                      (1 << 9)
-#define IRQ_RXPSI                      (1 << 8)
-#define IRQ_TXSAI                      (1 << 6)
-#define IRQ_RXWFDI                     (1 << 5)
-#define IRQ_RXMPDI                     (1 << 4)
-#define IRQ_LDI                                (1 << 3)
-#define IRQ_EDI                                (1 << 2)
-#define IRQ_SPIBEI                     (1 << 1)
-#define IRQ_DEDI                       (1 << 0)
-
-#define KS_RXFCTR                      0x9C
-#define RXFCTR_THRESHOLD_MASK          0x00FF
-
-#define KS_RXFC                                0x9D
-#define RXFCTR_RXFC_MASK               (0xff << 8)
-#define RXFCTR_RXFC_SHIFT              (8)
-#define RXFCTR_RXFC_GET(_v)            (((_v) >> 8) & 0xff)
-#define RXFCTR_RXFCT_MASK              (0xff << 0)
-#define RXFCTR_RXFCT_SHIFT             (0)
-
-#define KS_TXNTFSR                     0x9E
-
-#define KS_MAHTR0                      0xA0
-#define KS_MAHTR1                      0xA2
-#define KS_MAHTR2                      0xA4
-#define KS_MAHTR3                      0xA6
-
-#define KS_FCLWR                       0xB0
-#define KS_FCHWR                       0xB2
-#define KS_FCOWR                       0xB4
-
-#define KS_CIDER                       0xC0
-#define CIDER_ID                       0x8870
-#define CIDER_REV_MASK                 (0x7 << 1)
-#define CIDER_REV_SHIFT                        (1)
-#define CIDER_REV_GET(_v)              (((_v) >> 1) & 0x7)
-
-#define KS_CGCR                                0xC6
-#define KS_IACR                                0xC8
-#define IACR_RDEN                      (1 << 12)
-#define IACR_TSEL_MASK                 (0x3 << 10)
-#define IACR_TSEL_SHIFT                        (10)
-#define IACR_TSEL_MIB                  (0x3 << 10)
-#define IACR_ADDR_MASK                 (0x1f << 0)
-#define IACR_ADDR_SHIFT                        (0)
-
-#define KS_IADLR                       0xD0
-#define KS_IAHDR                       0xD2
-
-#define KS_PMECR                       0xD4
-#define PMECR_PME_DELAY                        (1 << 14)
-#define PMECR_PME_POL                  (1 << 12)
-#define PMECR_WOL_WAKEUP               (1 << 11)
-#define PMECR_WOL_MAGICPKT             (1 << 10)
-#define PMECR_WOL_LINKUP               (1 << 9)
-#define PMECR_WOL_ENERGY               (1 << 8)
-#define PMECR_AUTO_WAKE_EN             (1 << 7)
-#define PMECR_WAKEUP_NORMAL            (1 << 6)
-#define PMECR_WKEVT_MASK               (0xf << 2)
-#define PMECR_WKEVT_SHIFT              (2)
-#define PMECR_WKEVT_GET(_v)            (((_v) >> 2) & 0xf)
-#define PMECR_WKEVT_ENERGY             (0x1 << 2)
-#define PMECR_WKEVT_LINK               (0x2 << 2)
-#define PMECR_WKEVT_MAGICPKT           (0x4 << 2)
-#define PMECR_WKEVT_FRAME              (0x8 << 2)
-#define PMECR_PM_MASK                  (0x3 << 0)
-#define PMECR_PM_SHIFT                 (0)
-#define PMECR_PM_NORMAL                        (0x0 << 0)
-#define PMECR_PM_ENERGY                        (0x1 << 0)
-#define PMECR_PM_SOFTDOWN              (0x2 << 0)
-#define PMECR_PM_POWERSAVE             (0x3 << 0)
-
-/* Standard MII PHY data */
-#define KS_P1MBCR                      0xE4
-#define P1MBCR_FORCE_FDX               (1 << 8)
-
-#define KS_P1MBSR                      0xE6
-#define P1MBSR_AN_COMPLETE             (1 << 5)
-#define P1MBSR_AN_CAPABLE              (1 << 3)
-#define P1MBSR_LINK_UP                 (1 << 2)
-
-#define KS_PHY1ILR                     0xE8
-#define KS_PHY1IHR                     0xEA
-#define KS_P1ANAR                      0xEC
-#define KS_P1ANLPR                     0xEE
-
-#define KS_P1SCLMD                     0xF4
-#define P1SCLMD_LEDOFF                 (1 << 15)
-#define P1SCLMD_TXIDS                  (1 << 14)
-#define P1SCLMD_RESTARTAN              (1 << 13)
-#define P1SCLMD_DISAUTOMDIX            (1 << 10)
-#define P1SCLMD_FORCEMDIX              (1 << 9)
-#define P1SCLMD_AUTONEGEN              (1 << 7)
-#define P1SCLMD_FORCE100               (1 << 6)
-#define P1SCLMD_FORCEFDX               (1 << 5)
-#define P1SCLMD_ADV_FLOW               (1 << 4)
-#define P1SCLMD_ADV_100BT_FDX          (1 << 3)
-#define P1SCLMD_ADV_100BT_HDX          (1 << 2)
-#define P1SCLMD_ADV_10BT_FDX           (1 << 1)
-#define P1SCLMD_ADV_10BT_HDX           (1 << 0)
-
-#define KS_P1CR                                0xF6
-#define P1CR_HP_MDIX                   (1 << 15)
-#define P1CR_REV_POL                   (1 << 13)
-#define P1CR_OP_100M                   (1 << 10)
-#define P1CR_OP_FDX                    (1 << 9)
-#define P1CR_OP_MDI                    (1 << 7)
-#define P1CR_AN_DONE                   (1 << 6)
-#define P1CR_LINK_GOOD                 (1 << 5)
-#define P1CR_PNTR_FLOW                 (1 << 4)
-#define P1CR_PNTR_100BT_FDX            (1 << 3)
-#define P1CR_PNTR_100BT_HDX            (1 << 2)
-#define P1CR_PNTR_10BT_FDX             (1 << 1)
-#define P1CR_PNTR_10BT_HDX             (1 << 0)
-
-/* TX Frame control */
-
-#define TXFR_TXIC                      (1 << 15)
-#define TXFR_TXFID_MASK                        (0x3f << 0)
-#define TXFR_TXFID_SHIFT               (0)
-
-#define KS_P1SR                                0xF8
-#define P1SR_HP_MDIX                   (1 << 15)
-#define P1SR_REV_POL                   (1 << 13)
-#define P1SR_OP_100M                   (1 << 10)
-#define P1SR_OP_FDX                    (1 << 9)
-#define P1SR_OP_MDI                    (1 << 7)
-#define P1SR_AN_DONE                   (1 << 6)
-#define P1SR_LINK_GOOD                 (1 << 5)
-#define P1SR_PNTR_FLOW                 (1 << 4)
-#define P1SR_PNTR_100BT_FDX            (1 << 3)
-#define P1SR_PNTR_100BT_HDX            (1 << 2)
-#define P1SR_PNTR_10BT_FDX             (1 << 1)
-#define P1SR_PNTR_10BT_HDX             (1 << 0)
-
 #define        ENUM_BUS_NONE                   0
 #define        ENUM_BUS_8BIT                   1
 #define        ENUM_BUS_16BIT                  2
@@ -1475,7 +1168,7 @@ static void ks_setup(struct ks_net *ks)
        ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
 
        /* Setup Receive Frame Threshold - 1 frame (RXFCTFC) */
-       ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_THRESHOLD_MASK);
+       ks_wrreg16(ks, KS_RXFCTR, 1 & RXFCTR_RXFCT_MASK);
 
        /* Setup RxQ Command Control (RXQCR) */
        ks->rc_rxqcr = RXQCR_CMD_CNTL;
@@ -1488,7 +1181,7 @@ static void ks_setup(struct ks_net *ks)
         */
 
        w = ks_rdreg16(ks, KS_P1MBCR);
-       w &= ~P1MBCR_FORCE_FDX;
+       w &= ~BMCR_FULLDPLX;
        ks_wrreg16(ks, KS_P1MBCR, w);
 
        w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
@@ -1629,7 +1322,7 @@ static int ks8851_probe(struct platform_device *pdev)
        ks_setup_int(ks);
 
        data = ks_rdreg16(ks, KS_OBCR);
-       ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16MA);
+       ks_wrreg16(ks, KS_OBCR, data | OBCR_ODS_16mA);
 
        /* overwriting the default MAC address */
        if (pdev->dev.of_node) {
index 3b0adda7cc9c66769f84a1047a91aaa33c7939c8..a4cd6f2cfb862cb25315823d155b5497e59f5c2f 100644 (file)
@@ -1048,6 +1048,8 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
 
        for (i = 0; i < QLCNIC_NUM_ILB_PKT; i++) {
                skb = netdev_alloc_skb(adapter->netdev, QLCNIC_ILB_PKT_SIZE);
+               if (!skb)
+                       break;
                qlcnic_create_loopback_buff(skb->data, adapter->mac_addr);
                skb_put(skb, QLCNIC_ILB_PKT_SIZE);
                adapter->ahw->diag_cnt = 0;
index cfb67b7465958ec4eb6ae8aa68003a777052310b..58e0ca9093d3d9b4f08fe4bd2a8b3c3db23267a0 100644 (file)
@@ -482,7 +482,7 @@ static void hardware_init(struct net_device *dev)
        write_reg_high(ioaddr, IMR, ISRh_RxErr);
 
        lp->tx_unit_busy = 0;
-    lp->pac_cnt_in_tx_buf = 0;
+       lp->pac_cnt_in_tx_buf = 0;
        lp->saved_tx_size = 0;
 }
 
index c29dde0640784b57a687888c605fb2e52c1b5117..7562ccbbb39af59a2ba0e4078b2f43b2a7376809 100644 (file)
@@ -678,6 +678,7 @@ struct rtl8169_private {
                struct work_struct work;
        } wk;
 
+       unsigned irq_enabled:1;
        unsigned supports_gmii:1;
        dma_addr_t counters_phys_addr;
        struct rtl8169_counters *counters;
@@ -1293,6 +1294,7 @@ static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
 static void rtl_irq_disable(struct rtl8169_private *tp)
 {
        RTL_W16(tp, IntrMask, 0);
+       tp->irq_enabled = 0;
 }
 
 #define RTL_EVENT_NAPI_RX      (RxOK | RxErr)
@@ -1301,6 +1303,7 @@ static void rtl_irq_disable(struct rtl8169_private *tp)
 
 static void rtl_irq_enable(struct rtl8169_private *tp)
 {
+       tp->irq_enabled = 1;
        RTL_W16(tp, IntrMask, tp->irq_mask);
 }
 
@@ -6520,9 +6523,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
 {
        struct rtl8169_private *tp = dev_instance;
        u16 status = RTL_R16(tp, IntrStatus);
-       u16 irq_mask = RTL_R16(tp, IntrMask);
 
-       if (status == 0xffff || !(status & irq_mask))
+       if (!tp->irq_enabled || status == 0xffff || !(status & tp->irq_mask))
                return IRQ_NONE;
 
        if (unlikely(status & SYSErr)) {
@@ -6540,7 +6542,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
        }
 
-       if (status & RTL_EVENT_NAPI) {
+       if (status & (RTL_EVENT_NAPI | LinkChg)) {
                rtl_irq_disable(tp);
                napi_schedule_irqoff(&tp->napi);
        }
index 6073387511f887e4b0cf069041a0b04b759572d1..67f9bb6e941b7ed2467dcfd78a49a0e6c2c8165d 100644 (file)
@@ -730,10 +730,10 @@ static u16 sis900_default_phy(struct net_device * net_dev)
                status = mdio_read(net_dev, phy->phy_addr, MII_STATUS);
 
                /* Link ON & Not select default PHY & not ghost PHY */
-                if ((status & MII_STAT_LINK) && !default_phy &&
-                                       (phy->phy_types != UNKNOWN))
-                       default_phy = phy;
-                else {
+               if ((status & MII_STAT_LINK) && !default_phy &&
+                   (phy->phy_types != UNKNOWN)) {
+                       default_phy = phy;
+               } else {
                        status = mdio_read(net_dev, phy->phy_addr, MII_CONTROL);
                        mdio_write(net_dev, phy->phy_addr, MII_CONTROL,
                                status | MII_CNTL_AUTO | MII_CNTL_ISOLATE);
@@ -741,7 +741,7 @@ static u16 sis900_default_phy(struct net_device * net_dev)
                                phy_home = phy;
                        else if(phy->phy_types == LAN)
                                phy_lan = phy;
-                }
+               }
        }
 
        if (!default_phy && phy_home)
index d8c5bc4122195d73f7150f2775797cc6ba9a3393..4d9bcb4d0378319d2d71d61a6e751ab9d141083d 100644 (file)
@@ -59,7 +59,7 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
-                               STMMAC_RING_MODE, 1, false, skb->len);
+                               STMMAC_RING_MODE, 0, false, skb->len);
                tx_q->tx_skbuff[entry] = NULL;
                entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
 
@@ -79,7 +79,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
 
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
-                               STMMAC_RING_MODE, 1, true, skb->len);
+                               STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
+                               skb->len);
        } else {
                des2 = dma_map_single(priv->device, skb->data,
                                      nopaged_len, DMA_TO_DEVICE);
@@ -91,7 +92,8 @@ static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
                tx_q->tx_skbuff_dma[entry].is_jumbo = true;
                desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
                stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
-                               STMMAC_RING_MODE, 1, true, skb->len);
+                               STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
+                               skb->len);
        }
 
        tx_q->cur_tx = entry;
@@ -111,10 +113,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
 
 static void refill_desc3(void *priv_ptr, struct dma_desc *p)
 {
-       struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
+       struct stmmac_rx_queue *rx_q = priv_ptr;
+       struct stmmac_priv *priv = rx_q->priv_data;
 
        /* Fill DES3 in case of RING mode */
-       if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+       if (priv->dma_buf_sz == BUF_SIZE_16KiB)
                p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
 }
 
index 97c5e1aad88f979208c80efe30d19cfcc5ba05e6..6a2e1031a62ae3c4d16f7f09f4d09481ccfa325d 100644 (file)
@@ -3216,14 +3216,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
                stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
                                csum_insertion, priv->mode, 1, last_segment,
                                skb->len);
-
-               /* The own bit must be the latest setting done when prepare the
-                * descriptor and then barrier is needed to make sure that
-                * all is coherent before granting the DMA engine.
-                */
-               wmb();
+       } else {
+               stmmac_set_tx_owner(priv, first);
        }
 
+       /* The own bit must be the latest setting done when prepare the
+        * descriptor and then barrier is needed to make sure that
+        * all is coherent before granting the DMA engine.
+        */
+       wmb();
+
        netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
 
        stmmac_enable_dma_transmission(priv, priv->ioaddr);
index 5174d318901e0f74aa6fa6c7e12b29bfe2b362c7..0a920c5936b24e1a14a8625a63f9bf2eed019ccb 100644 (file)
@@ -3657,12 +3657,16 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
 
        ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
                                gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
-       if (ret)
+       if (ret) {
+               of_node_put(interfaces);
                return ret;
+       }
 
        ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
-       if (ret)
+       if (ret) {
+               of_node_put(interfaces);
                return ret;
+       }
 
        /* Create network interfaces */
        INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
index ec7e7ec24ff910f7db36f9da1bbcd8c04ccf949a..4041c75997ba5ed52803b69b4c2e19bf41dee8e8 100644 (file)
@@ -1575,12 +1575,14 @@ static int axienet_probe(struct platform_device *pdev)
        ret = of_address_to_resource(np, 0, &dmares);
        if (ret) {
                dev_err(&pdev->dev, "unable to get DMA resource\n");
+               of_node_put(np);
                goto free_netdev;
        }
        lp->dma_regs = devm_ioremap_resource(&pdev->dev, &dmares);
        if (IS_ERR(lp->dma_regs)) {
                dev_err(&pdev->dev, "could not map DMA regs\n");
                ret = PTR_ERR(lp->dma_regs);
+               of_node_put(np);
                goto free_netdev;
        }
        lp->rx_irq = irq_of_parse_and_map(np, 1);
index cd1d8faccca5fb36b488312d734d5e42cebb7b1a..cd6b95e673a58319a2f0ea0ed15445cc1782435f 100644 (file)
@@ -1268,6 +1268,10 @@ static int adf7242_probe(struct spi_device *spi)
        INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
        lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
                                             WQ_MEM_RECLAIM);
+       if (unlikely(!lp->wqueue)) {
+               ret = -ENOMEM;
+               goto err_hw_init;
+       }
 
        ret = adf7242_hw_init(lp);
        if (ret)
index b6743f03dce000578b65bf9a8afddd3c2613d628..3b88846de31b18236e423c3b941f69537ab3bcfa 100644 (file)
@@ -324,7 +324,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info)
                        goto out_err;
                }
 
-               genlmsg_reply(skb, info);
+               res = genlmsg_reply(skb, info);
                break;
        }
 
index 071869db44cf3e0b33cc75b3b4fd285212bf1c9d..520657945b8279debe7583d33c31c095899ddb7a 100644 (file)
@@ -7,6 +7,8 @@ menuconfig MDIO_DEVICE
        help
          MDIO devices and driver infrastructure code.
 
+if MDIO_DEVICE
+
 config MDIO_BUS
        tristate
        default m if PHYLIB=m
@@ -179,6 +181,7 @@ config MDIO_XGENE
          APM X-Gene SoC's.
 
 endif
+endif
 
 config PHYLINK
        tristate
index 9605d4fe540b1e4ed894d4ff5ab4f40e5d9caf16..cb86a3e90c7de3ff41a7d821c135aec8dad9eef0 100644 (file)
@@ -323,6 +323,19 @@ static int bcm54xx_config_init(struct phy_device *phydev)
 
        bcm54xx_phydsp_config(phydev);
 
+       /* Encode link speed into LED1 and LED3 pair (green/amber).
+        * Also flash these two LEDs on activity. This means configuring
+        * them for MULTICOLOR and encoding link/activity into them.
+        */
+       val = BCM5482_SHD_LEDS1_LED1(BCM_LED_SRC_MULTICOLOR1) |
+               BCM5482_SHD_LEDS1_LED3(BCM_LED_SRC_MULTICOLOR1);
+       bcm_phy_write_shadow(phydev, BCM5482_SHD_LEDS1, val);
+
+       val = BCM_LED_MULTICOLOR_IN_PHASE |
+               BCM5482_SHD_LEDS1_LED1(BCM_LED_MULTICOLOR_LINK_ACT) |
+               BCM5482_SHD_LEDS1_LED3(BCM_LED_MULTICOLOR_LINK_ACT);
+       bcm_phy_write_exp(phydev, BCM_EXP_MULTICOLOR, val);
+
        return 0;
 }
 
index bbd8c22067f3d2c4975757febf0658ddb1c3e8f7..97d45bd5b38e382b678dc3ce814f813cf045d7d6 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/netdevice.h>
 
 #define DP83822_PHY_ID         0x2000a240
+#define DP83825I_PHY_ID                0x2000a150
+
 #define DP83822_DEVADDR                0x1f
 
 #define MII_DP83822_PHYSCR     0x11
@@ -304,26 +306,30 @@ static int dp83822_resume(struct phy_device *phydev)
        return 0;
 }
 
+#define DP83822_PHY_DRIVER(_id, _name)                         \
+       {                                                       \
+               PHY_ID_MATCH_MODEL(_id),                        \
+               .name           = (_name),                      \
+               .features       = PHY_BASIC_FEATURES,           \
+               .soft_reset     = dp83822_phy_reset,            \
+               .config_init    = dp83822_config_init,          \
+               .get_wol = dp83822_get_wol,                     \
+               .set_wol = dp83822_set_wol,                     \
+               .ack_interrupt = dp83822_ack_interrupt,         \
+               .config_intr = dp83822_config_intr,             \
+               .suspend = dp83822_suspend,                     \
+               .resume = dp83822_resume,                       \
+       }
+
 static struct phy_driver dp83822_driver[] = {
-       {
-               .phy_id = DP83822_PHY_ID,
-               .phy_id_mask = 0xfffffff0,
-               .name = "TI DP83822",
-               .features = PHY_BASIC_FEATURES,
-               .config_init = dp83822_config_init,
-               .soft_reset = dp83822_phy_reset,
-               .get_wol = dp83822_get_wol,
-               .set_wol = dp83822_set_wol,
-               .ack_interrupt = dp83822_ack_interrupt,
-               .config_intr = dp83822_config_intr,
-               .suspend = dp83822_suspend,
-               .resume = dp83822_resume,
-        },
+       DP83822_PHY_DRIVER(DP83822_PHY_ID, "TI DP83822"),
+       DP83822_PHY_DRIVER(DP83825I_PHY_ID, "TI DP83825I"),
 };
 module_phy_driver(dp83822_driver);
 
 static struct mdio_device_id __maybe_unused dp83822_tbl[] = {
        { DP83822_PHY_ID, 0xfffffff0 },
+       { DP83825I_PHY_ID, 0xfffffff0 },
        { },
 };
 MODULE_DEVICE_TABLE(mdio, dp83822_tbl);
index a238388eb1a5e09f138f5a63d627cc25f076da29..0eec2913c289b83a77a238aca2da64e558378336 100644 (file)
@@ -201,6 +201,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
 static int meson_gxl_config_intr(struct phy_device *phydev)
 {
        u16 val;
+       int ret;
 
        if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
                val = INTSRC_ANEG_PR
@@ -213,6 +214,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
                val = 0;
        }
 
+       /* Ack any pending IRQ */
+       ret = meson_gxl_ack_interrupt(phydev);
+       if (ret)
+               return ret;
+
        return phy_write(phydev, INTSRC_MASK, val);
 }
 
index 49fdd1ee798e4418f5145b00ad6573525eed0f82..77068c545de0d33607981e7a94a32bf7ed1ff34c 100644 (file)
@@ -1831,7 +1831,7 @@ int genphy_soft_reset(struct phy_device *phydev)
 {
        int ret;
 
-       ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+       ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
        if (ret < 0)
                return ret;
 
index 1d68921723dc08532b3f5321a52865076ad66336..e9ca1c088d0b11611e4d80268ced3806db05cffb 100644 (file)
@@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        int skb_xdp = 1;
        bool frags = tun_napi_frags_enabled(tfile);
 
-       if (!(tun->dev->flags & IFF_UP))
-               return -EIO;
-
        if (!(tun->flags & IFF_NO_PI)) {
                if (len < sizeof(pi))
                        return -EINVAL;
@@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                        err = skb_copy_datagram_from_iter(skb, 0, from, len);
 
                if (err) {
+                       err = -EFAULT;
+drop:
                        this_cpu_inc(tun->pcpu_stats->rx_dropped);
                        kfree_skb(skb);
                        if (frags) {
@@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                                mutex_unlock(&tfile->napi_mutex);
                        }
 
-                       return -EFAULT;
+                       return err;
                }
        }
 
@@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
            !tfile->detached)
                rxhash = __skb_get_hash_symmetric(skb);
 
+       rcu_read_lock();
+       if (unlikely(!(tun->dev->flags & IFF_UP))) {
+               err = -EIO;
+               rcu_read_unlock();
+               goto drop;
+       }
+
        if (frags) {
                /* Exercise flow dissector code path. */
                u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
@@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                if (unlikely(headlen > skb_headlen(skb))) {
                        this_cpu_inc(tun->pcpu_stats->rx_dropped);
                        napi_free_frags(&tfile->napi);
+                       rcu_read_unlock();
                        mutex_unlock(&tfile->napi_mutex);
                        WARN_ON(1);
                        return -ENOMEM;
@@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
        } else {
                netif_rx_ni(skb);
        }
+       rcu_read_unlock();
 
        stats = get_cpu_ptr(tun->pcpu_stats);
        u64_stats_update_begin(&stats->syncp);
index 820a2fe7d027733eb1c9ccc54aa504a4088a6600..aff995be2a318796a832e19c5c3c3e3cfc5c9efd 100644 (file)
@@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
        .tx_fixup       = aqc111_tx_fixup,
 };
 
+static const struct driver_info qnap_info = {
+       .description    = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
+       .bind           = aqc111_bind,
+       .unbind         = aqc111_unbind,
+       .status         = aqc111_status,
+       .link_reset     = aqc111_link_reset,
+       .reset          = aqc111_reset,
+       .stop           = aqc111_stop,
+       .flags          = FLAG_ETHER | FLAG_FRAMING_AX |
+                         FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
+       .rx_fixup       = aqc111_rx_fixup,
+       .tx_fixup       = aqc111_tx_fixup,
+};
+
 static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
@@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
        {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
        {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
        {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
+       {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
        { },/* END */
 };
 MODULE_DEVICE_TABLE(usb, products);
index 5512a1038721459a727326bb8823e16c0886b7f1..3e9b2c319e45256865415da43386031c556d9e2b 100644 (file)
@@ -851,6 +851,14 @@ static const struct usb_device_id  products[] = {
        .driver_info = 0,
 },
 
+/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
+{
+       USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
+                                     USB_CDC_SUBCLASS_ETHERNET,
+                                     USB_CDC_PROTO_NONE),
+       .driver_info = 0,
+},
+
 /* WHITELIST!!!
  *
  * CDC Ether uses two interfaces, not necessarily consecutive.
index 077f1b9f27616d34e966fc3760b40fd1f6a5915a..d76dfed8d9bbef1d1ae8470686e417af2e531ac8 100644 (file)
@@ -4335,10 +4335,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
                /* If vxlan->dev is in the same netns, it has already been added
                 * to the list by the previous loop.
                 */
-               if (!net_eq(dev_net(vxlan->dev), net)) {
-                       gro_cells_destroy(&vxlan->gro_cells);
+               if (!net_eq(dev_net(vxlan->dev), net))
                        unregister_netdevice_queue(vxlan->dev, head);
-               }
        }
 
        for (h = 0; h < PORT_HASH_SIZE; ++h)
index e9822a3ec373929ff520c9dea90c51194adcb69b..94132cfd1f56241b7e9548466c7050541c87e4c5 100644 (file)
@@ -460,9 +460,7 @@ static int iwl_mvm_ftm_range_resp_valid(struct iwl_mvm *mvm, u8 request_id,
 static void iwl_mvm_debug_range_resp(struct iwl_mvm *mvm, u8 index,
                                     struct cfg80211_pmsr_result *res)
 {
-       s64 rtt_avg = res->ftm.rtt_avg * 100;
-
-       do_div(rtt_avg, 6666);
+       s64 rtt_avg = div_s64(res->ftm.rtt_avg * 100, 6666);
 
        IWL_DEBUG_INFO(mvm, "entry %d\n", index);
        IWL_DEBUG_INFO(mvm, "\tstatus: %d\n", res->status);
index 6eedc0ec76616cc55afec8b98f55db1ec27e540b..76629b98c78d78d81d7c61e0cd4771ba8d5f3c39 100644 (file)
@@ -130,6 +130,8 @@ mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
 static void
 mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
 {
+       iowrite32(q->desc_dma, &q->regs->desc_base);
+       iowrite32(q->ndesc, &q->regs->ring_size);
        q->head = ioread32(&q->regs->dma_idx);
        q->tail = q->head;
        iowrite32(q->head, &q->regs->cpu_idx);
@@ -180,7 +182,10 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
        else
                mt76_dma_sync_idx(dev, q);
 
-       wake = wake && qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+       wake = wake && q->stopped &&
+              qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+       if (wake)
+               q->stopped = false;
 
        if (!q->queued)
                wake_up(&dev->tx_wait);
index a033745adb2f7a738ac576aafd41aa931fcc5ea6..316167404729fdcd8322c71bd5626785822ce90d 100644 (file)
@@ -679,19 +679,15 @@ out:
        return ret;
 }
 
-static void
-mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
-               struct ieee80211_sta *sta)
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta)
 {
        struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-       int idx = wcid->idx;
-       int i;
+       int i, idx = wcid->idx;
 
        rcu_assign_pointer(dev->wcid[idx], NULL);
        synchronize_rcu();
 
-       mutex_lock(&dev->mutex);
-
        if (dev->drv->sta_remove)
                dev->drv->sta_remove(dev, vif, sta);
 
@@ -699,7 +695,15 @@ mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
        for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
                mt76_txq_remove(dev, sta->txq[i]);
        mt76_wcid_free(dev->wcid_mask, idx);
+}
+EXPORT_SYMBOL_GPL(__mt76_sta_remove);
 
+static void
+mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+               struct ieee80211_sta *sta)
+{
+       mutex_lock(&dev->mutex);
+       __mt76_sta_remove(dev, vif, sta);
        mutex_unlock(&dev->mutex);
 }
 
index 5dfb0601f1015c01251d409070ba64bda516db26..bcbfd3c4a44b68199dbc53e0e5ea498cc9e78e89 100644 (file)
@@ -126,6 +126,7 @@ struct mt76_queue {
        int ndesc;
        int queued;
        int buf_size;
+       bool stopped;
 
        u8 buf_offset;
        u8 hw_idx;
@@ -143,6 +144,7 @@ struct mt76_mcu_ops {
                         const struct mt76_reg_pair *rp, int len);
        int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
                         struct mt76_reg_pair *rp, int len);
+       int (*mcu_restart)(struct mt76_dev *dev);
 };
 
 struct mt76_queue_ops {
@@ -693,6 +695,8 @@ int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
                   struct ieee80211_sta *sta,
                   enum ieee80211_sta_state old_state,
                   enum ieee80211_sta_state new_state);
+void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
+                      struct ieee80211_sta *sta);
 
 struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb);
 
index afcd86f735b40e1a508d7fba5c4d38429906c9af..4dcb465095d19e9a0fe88c15a757a59fec802d87 100644 (file)
@@ -135,8 +135,7 @@ void mt7603_pre_tbtt_tasklet(unsigned long arg)
 
 out:
        mt76_queue_tx_cleanup(dev, MT_TXQ_BEACON, false);
-       if (dev->mt76.q_tx[MT_TXQ_BEACON].queued >
-           __sw_hweight8(dev->beacon_mask))
+       if (dev->mt76.q_tx[MT_TXQ_BEACON].queued > hweight8(dev->beacon_mask))
                dev->beacon_check++;
 }
 
index d69e82c66ab29fb8e8c645de9270ab47a066dd49..b3ae0aaea62a15b51b1ed2c23bc8857f88dec8c9 100644 (file)
@@ -27,12 +27,16 @@ static void
 mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
 {
        __le32 *txd = (__le32 *)skb->data;
+       struct ieee80211_hdr *hdr;
+       struct ieee80211_sta *sta;
        struct mt7603_sta *msta;
        struct mt76_wcid *wcid;
+       void *priv;
        int idx;
        u32 val;
+       u8 tid;
 
-       if (skb->len < sizeof(MT_TXD_SIZE) + sizeof(struct ieee80211_hdr))
+       if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
                goto free;
 
        val = le32_to_cpu(txd[1]);
@@ -46,10 +50,19 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
        if (!wcid)
                goto free;
 
-       msta = container_of(wcid, struct mt7603_sta, wcid);
+       priv = msta = container_of(wcid, struct mt7603_sta, wcid);
        val = le32_to_cpu(txd[0]);
        skb_set_queue_mapping(skb, FIELD_GET(MT_TXD0_Q_IDX, val));
 
+       val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+       val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
+       txd[0] = cpu_to_le32(val);
+
+       sta = container_of(priv, struct ieee80211_sta, drv_priv);
+       hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+       tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
+       ieee80211_sta_set_buffered(sta, tid, true);
+
        spin_lock_bh(&dev->ps_lock);
        __skb_queue_tail(&msta->psq, skb);
        if (skb_queue_len(&msta->psq) >= 64) {
index 15cc8f33b34d656d86c745cd10950d2253e864fc..d54dda67d036c19cffce6bc30765c39dc93ee326 100644 (file)
@@ -112,7 +112,7 @@ static void
 mt7603_phy_init(struct mt7603_dev *dev)
 {
        int rx_chains = dev->mt76.antenna_mask;
-       int tx_chains = __sw_hweight8(rx_chains) - 1;
+       int tx_chains = hweight8(rx_chains) - 1;
 
        mt76_rmw(dev, MT_WF_RMAC_RMCR,
                 (MT_WF_RMAC_RMCR_SMPS_MODE |
index 0a0115861b51e500c777daeb805685286ebb4f5a..5e31d7da96fc88e5fab246c61ec1d37a328a8700 100644 (file)
@@ -1072,7 +1072,7 @@ out:
        case MT_PHY_TYPE_HT:
                final_rate_flags |= IEEE80211_TX_RC_MCS;
                final_rate &= GENMASK(5, 0);
-               if (i > 15)
+               if (final_rate > 15)
                        return false;
                break;
        default:
index b10775ed92e65ff72036dc314c061437315c009c..cc0fe0933b2d8043e622f1b513817b6528bbcaae 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/pci.h>
 #include <linux/module.h>
 #include "mt7603.h"
+#include "mac.h"
 #include "eeprom.h"
 
 static int
@@ -385,6 +386,15 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
        mt7603_ps_tx_list(dev, &list);
 }
 
+static void
+mt7603_ps_set_more_data(struct sk_buff *skb)
+{
+       struct ieee80211_hdr *hdr;
+
+       hdr = (struct ieee80211_hdr *) &skb->data[MT_TXD_SIZE];
+       hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+}
+
 static void
 mt7603_release_buffered_frames(struct ieee80211_hw *hw,
                               struct ieee80211_sta *sta,
@@ -399,6 +409,8 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
 
        __skb_queue_head_init(&list);
 
+       mt7603_wtbl_set_ps(dev, msta, false);
+
        spin_lock_bh(&dev->ps_lock);
        skb_queue_walk_safe(&msta->psq, skb, tmp) {
                if (!nframes)
@@ -409,11 +421,15 @@ mt7603_release_buffered_frames(struct ieee80211_hw *hw,
 
                skb_set_queue_mapping(skb, MT_TXQ_PSD);
                __skb_unlink(skb, &msta->psq);
+               mt7603_ps_set_more_data(skb);
                __skb_queue_tail(&list, skb);
                nframes--;
        }
        spin_unlock_bh(&dev->ps_lock);
 
+       if (!skb_queue_empty(&list))
+               ieee80211_sta_eosp(sta);
+
        mt7603_ps_tx_list(dev, &list);
 
        if (nframes)
index 4b0713f1fd5e3da78c83efa05b8f14aabd2fbfc4..d06905ea8cc63f2b9851eb679655007c1e8f7486 100644 (file)
@@ -433,7 +433,7 @@ int mt7603_mcu_set_channel(struct mt7603_dev *dev)
 {
        struct cfg80211_chan_def *chandef = &dev->mt76.chandef;
        struct ieee80211_hw *hw = mt76_hw(dev);
-       int n_chains = __sw_hweight8(dev->mt76.antenna_mask);
+       int n_chains = hweight8(dev->mt76.antenna_mask);
        struct {
                u8 control_chan;
                u8 center_chan;
index e13fea80d970d228ef7b99faa69e489f017ffb49..b920be1f5718b75d2f7374b6bbe6a533ebdec9c2 100644 (file)
@@ -23,9 +23,9 @@ mt76_wmac_probe(struct platform_device *pdev)
        }
 
        mem_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!mem_base) {
+       if (IS_ERR(mem_base)) {
                dev_err(&pdev->dev, "Failed to get memory resource\n");
-               return -EINVAL;
+               return PTR_ERR(mem_base);
        }
 
        mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7603_ops,
index 0290ba5869a5182ca283db62629d38a2e37b21ef..736f81752b5b488518e1393e6200b67be7bfa87c 100644 (file)
@@ -46,7 +46,7 @@ static const struct mt76_reg_pair common_mac_reg_table[] = {
        { MT_MM20_PROT_CFG,             0x01742004 },
        { MT_MM40_PROT_CFG,             0x03f42084 },
        { MT_TXOP_CTRL_CFG,             0x0000583f },
-       { MT_TX_RTS_CFG,                0x00092b20 },
+       { MT_TX_RTS_CFG,                0x00ffff20 },
        { MT_EXP_ACK_TIME,              0x002400ca },
        { MT_TXOP_HLDR_ET,              0x00000002 },
        { MT_XIFS_TIME_CFG,             0x33a41010 },
index 91718647da0285e40eda86aa97dc98559592732d..e5a06f74a6f701419f703844f4e73fba6307a627 100644 (file)
@@ -229,7 +229,7 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
        struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
        struct mt76x02_dev *dev;
        struct mt76_dev *mdev;
-       u32 asic_rev, mac_rev;
+       u32 mac_rev;
        int ret;
 
        mdev = mt76_alloc_device(&usb_intf->dev, sizeof(*dev), &mt76x0u_ops,
@@ -262,10 +262,14 @@ static int mt76x0u_probe(struct usb_interface *usb_intf,
                goto err;
        }
 
-       asic_rev = mt76_rr(dev, MT_ASIC_VERSION);
+       mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
        mac_rev = mt76_rr(dev, MT_MAC_CSR0);
        dev_info(mdev->dev, "ASIC revision: %08x MAC revision: %08x\n",
-                asic_rev, mac_rev);
+                mdev->rev, mac_rev);
+       if (!is_mt76x0(dev)) {
+               ret = -ENODEV;
+               goto err;
+       }
 
        /* Note: vendor driver skips this check for MT76X0U */
        if (!(mt76_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
index 6915cce5def9342935784c888c477417e26b5c69..07061eb4d1e1b3ef97b7131791af79a0962b6360 100644 (file)
@@ -51,6 +51,7 @@ struct mt76x02_calibration {
        u16 false_cca;
        s8 avg_rssi_all;
        s8 agc_gain_adjust;
+       s8 agc_lowest_gain;
        s8 low_gain;
 
        s8 temp_vco;
@@ -114,8 +115,11 @@ struct mt76x02_dev {
        struct mt76x02_dfs_pattern_detector dfs_pd;
 
        /* edcca monitor */
+       unsigned long ed_trigger_timeout;
        bool ed_tx_blocked;
        bool ed_monitor;
+       u8 ed_monitor_enabled;
+       u8 ed_monitor_learning;
        u8 ed_trigger;
        u8 ed_silent;
        ktime_t ed_time;
@@ -188,6 +192,13 @@ void mt76x02_mac_start(struct mt76x02_dev *dev);
 
 void mt76x02_init_debugfs(struct mt76x02_dev *dev);
 
+static inline bool is_mt76x0(struct mt76x02_dev *dev)
+{
+       return mt76_chip(&dev->mt76) == 0x7610 ||
+              mt76_chip(&dev->mt76) == 0x7630 ||
+              mt76_chip(&dev->mt76) == 0x7650;
+}
+
 static inline bool is_mt76x2(struct mt76x02_dev *dev)
 {
        return mt76_chip(&dev->mt76) == 0x7612 ||
index 7580c5c986ffe5226f4c91feccaf73cd445aabc0..b1d6fd4861e3236b9cb255277c01b9a2f01b1fca 100644 (file)
@@ -116,6 +116,32 @@ static int read_agc(struct seq_file *file, void *data)
        return 0;
 }
 
+static int
+mt76_edcca_set(void *data, u64 val)
+{
+       struct mt76x02_dev *dev = data;
+       enum nl80211_dfs_regions region = dev->dfs_pd.region;
+
+       dev->ed_monitor_enabled = !!val;
+       dev->ed_monitor = dev->ed_monitor_enabled &&
+                         region == NL80211_DFS_ETSI;
+       mt76x02_edcca_init(dev, true);
+
+       return 0;
+}
+
+static int
+mt76_edcca_get(void *data, u64 *val)
+{
+       struct mt76x02_dev *dev = data;
+
+       *val = dev->ed_monitor_enabled;
+       return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_edcca, mt76_edcca_get, mt76_edcca_set,
+                        "%lld\n");
+
 void mt76x02_init_debugfs(struct mt76x02_dev *dev)
 {
        struct dentry *dir;
@@ -127,6 +153,7 @@ void mt76x02_init_debugfs(struct mt76x02_dev *dev)
        debugfs_create_u8("temperature", 0400, dir, &dev->cal.temp);
        debugfs_create_bool("tpc", 0600, dir, &dev->enable_tpc);
 
+       debugfs_create_file("edcca", 0400, dir, dev, &fops_edcca);
        debugfs_create_file("ampdu_stat", 0400, dir, dev, &fops_ampdu_stat);
        debugfs_create_file("dfs_stats", 0400, dir, dev, &fops_dfs_stat);
        debugfs_create_devm_seqfile(dev->mt76.dev, "txpower", dir,
index e4649103efd49ab5d77c28f3f01d1aac1c81fc3b..17d12d212d1ba1d0a3eb8f65b9e0ecef42908406 100644 (file)
@@ -885,7 +885,8 @@ mt76x02_dfs_set_domain(struct mt76x02_dev *dev,
        if (dfs_pd->region != region) {
                tasklet_disable(&dfs_pd->dfs_tasklet);
 
-               dev->ed_monitor = region == NL80211_DFS_ETSI;
+               dev->ed_monitor = dev->ed_monitor_enabled &&
+                                 region == NL80211_DFS_ETSI;
                mt76x02_edcca_init(dev, true);
 
                dfs_pd->region = region;
index 91ff6598eccfb55dfb0fe8e96b4edf6e7941f59f..9ed231abe91676119d751b06cfa995a7f5dd716c 100644 (file)
@@ -67,12 +67,39 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
 }
 EXPORT_SYMBOL_GPL(mt76x02_mac_shared_key_setup);
 
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+                             struct ieee80211_key_conf *key)
+{
+       enum mt76x02_cipher_type cipher;
+       u8 key_data[32];
+       u32 iv, eiv;
+       u64 pn;
+
+       cipher = mt76x02_mac_get_key_info(key, key_data);
+       iv = mt76_rr(dev, MT_WCID_IV(idx));
+       eiv = mt76_rr(dev, MT_WCID_IV(idx) + 4);
+
+       pn = (u64)eiv << 16;
+       if (cipher == MT_CIPHER_TKIP) {
+               pn |= (iv >> 16) & 0xff;
+               pn |= (iv & 0xff) << 8;
+       } else if (cipher >= MT_CIPHER_AES_CCMP) {
+               pn |= iv & 0xffff;
+       } else {
+               return;
+       }
+
+       atomic64_set(&key->tx_pn, pn);
+}
+
+
 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                             struct ieee80211_key_conf *key)
 {
        enum mt76x02_cipher_type cipher;
        u8 key_data[32];
        u8 iv_data[8];
+       u64 pn;
 
        cipher = mt76x02_mac_get_key_info(key, key_data);
        if (cipher == MT_CIPHER_NONE && key)
@@ -85,9 +112,22 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
        if (key) {
                mt76_rmw_field(dev, MT_WCID_ATTR(idx), MT_WCID_ATTR_PAIRWISE,
                               !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
+
+               pn = atomic64_read(&key->tx_pn);
+
                iv_data[3] = key->keyidx << 6;
-               if (cipher >= MT_CIPHER_TKIP)
+               if (cipher >= MT_CIPHER_TKIP) {
                        iv_data[3] |= 0x20;
+                       put_unaligned_le32(pn >> 16, &iv_data[4]);
+               }
+
+               if (cipher == MT_CIPHER_TKIP) {
+                       iv_data[0] = (pn >> 8) & 0xff;
+                       iv_data[1] = (iv_data[0] | 0x20) & 0x7f;
+                       iv_data[2] = pn & 0xff;
+               } else if (cipher >= MT_CIPHER_AES_CCMP) {
+                       put_unaligned_le16((pn & 0xffff), &iv_data[0]);
+               }
        }
 
        mt76_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data));
@@ -920,6 +960,7 @@ void mt76x02_edcca_init(struct mt76x02_dev *dev, bool enable)
                }
        }
        mt76x02_edcca_tx_enable(dev, true);
+       dev->ed_monitor_learning = true;
 
        /* clear previous CCA timer value */
        mt76_rr(dev, MT_ED_CCA_TIMER);
@@ -929,6 +970,10 @@ EXPORT_SYMBOL_GPL(mt76x02_edcca_init);
 
 #define MT_EDCCA_TH            92
 #define MT_EDCCA_BLOCK_TH      2
+#define MT_EDCCA_LEARN_TH      50
+#define MT_EDCCA_LEARN_CCA     180
+#define MT_EDCCA_LEARN_TIMEOUT (20 * HZ)
+
 static void mt76x02_edcca_check(struct mt76x02_dev *dev)
 {
        ktime_t cur_time;
@@ -951,11 +996,23 @@ static void mt76x02_edcca_check(struct mt76x02_dev *dev)
                dev->ed_trigger = 0;
        }
 
-       if (dev->ed_trigger > MT_EDCCA_BLOCK_TH &&
-           !dev->ed_tx_blocked)
+       if (dev->cal.agc_lowest_gain &&
+           dev->cal.false_cca > MT_EDCCA_LEARN_CCA &&
+           dev->ed_trigger > MT_EDCCA_LEARN_TH) {
+               dev->ed_monitor_learning = false;
+               dev->ed_trigger_timeout = jiffies + 20 * HZ;
+       } else if (!dev->ed_monitor_learning &&
+                  time_is_after_jiffies(dev->ed_trigger_timeout)) {
+               dev->ed_monitor_learning = true;
+               mt76x02_edcca_tx_enable(dev, true);
+       }
+
+       if (dev->ed_monitor_learning)
+               return;
+
+       if (dev->ed_trigger > MT_EDCCA_BLOCK_TH && !dev->ed_tx_blocked)
                mt76x02_edcca_tx_enable(dev, false);
-       else if (dev->ed_silent > MT_EDCCA_BLOCK_TH &&
-                dev->ed_tx_blocked)
+       else if (dev->ed_silent > MT_EDCCA_BLOCK_TH && dev->ed_tx_blocked)
                mt76x02_edcca_tx_enable(dev, true);
 }
 
index 6b1f25d2f64c3a931fbf1bd4c695984f9d5bf4ff..caeeef96c42faf74ccf9bb15cc60f4ffe865e9f4 100644 (file)
@@ -177,6 +177,8 @@ int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx,
                                 u8 key_idx, struct ieee80211_key_conf *key);
 int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx,
                             struct ieee80211_key_conf *key);
+void mt76x02_mac_wcid_sync_pn(struct mt76x02_dev *dev, u8 idx,
+                             struct ieee80211_key_conf *key);
 void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx,
                            u8 *mac);
 void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop);
index 1229f19f2b02c68b4144662e8097333d5133f3ac..daaed1220147ea914c32f13f32f04bd5a360d7fb 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/irq.h>
 
 #include "mt76x02.h"
+#include "mt76x02_mcu.h"
 #include "mt76x02_trace.h"
 
 struct beacon_bc_data {
@@ -418,9 +419,66 @@ static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
        return i < 4;
 }
 
+static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+                            struct ieee80211_sta *sta,
+                            struct ieee80211_key_conf *key, void *data)
+{
+       struct mt76x02_dev *dev = hw->priv;
+       struct mt76_wcid *wcid;
+
+       if (!sta)
+           return;
+
+       wcid = (struct mt76_wcid *) sta->drv_priv;
+
+       if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
+           return;
+
+       mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
+}
+
+static void mt76x02_reset_state(struct mt76x02_dev *dev)
+{
+       int i;
+
+       lockdep_assert_held(&dev->mt76.mutex);
+
+       clear_bit(MT76_STATE_RUNNING, &dev->mt76.state);
+
+       rcu_read_lock();
+       ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
+       rcu_read_unlock();
+
+       for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) {
+               struct ieee80211_sta *sta;
+               struct ieee80211_vif *vif;
+               struct mt76x02_sta *msta;
+               struct mt76_wcid *wcid;
+               void *priv;
+
+               wcid = rcu_dereference_protected(dev->mt76.wcid[i],
+                                       lockdep_is_held(&dev->mt76.mutex));
+               if (!wcid)
+                       continue;
+
+               priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
+               sta = container_of(priv, struct ieee80211_sta, drv_priv);
+
+               priv = msta->vif;
+               vif = container_of(priv, struct ieee80211_vif, drv_priv);
+
+               __mt76_sta_remove(&dev->mt76, vif, sta);
+               memset(msta, 0, sizeof(*msta));
+       }
+
+       dev->vif_mask = 0;
+       dev->beacon_mask = 0;
+}
+
 static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
 {
        u32 mask = dev->mt76.mmio.irqmask;
+       bool restart = dev->mt76.mcu_ops->mcu_restart;
        int i;
 
        ieee80211_stop_queues(dev->mt76.hw);
@@ -434,6 +492,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
 
        mutex_lock(&dev->mt76.mutex);
 
+       if (restart)
+               mt76x02_reset_state(dev);
+
        if (dev->beacon_mask)
                mt76_clear(dev, MT_BEACON_TIME_CFG,
                           MT_BEACON_TIME_CFG_BEACON_TX |
@@ -452,20 +513,21 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
        /* let fw reset DMA */
        mt76_set(dev, 0x734, 0x3);
 
+       if (restart)
+               dev->mt76.mcu_ops->mcu_restart(&dev->mt76);
+
        for (i = 0; i < ARRAY_SIZE(dev->mt76.q_tx); i++)
                mt76_queue_tx_cleanup(dev, i, true);
 
        for (i = 0; i < ARRAY_SIZE(dev->mt76.q_rx); i++)
                mt76_queue_rx_reset(dev, i);
 
-       mt76_wr(dev, MT_MAC_SYS_CTRL,
-               MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX);
-       mt76_set(dev, MT_WPDMA_GLO_CFG,
-                MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
+       mt76x02_mac_start(dev);
+
        if (dev->ed_monitor)
                mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
 
-       if (dev->beacon_mask)
+       if (dev->beacon_mask && !restart)
                mt76_set(dev, MT_BEACON_TIME_CFG,
                         MT_BEACON_TIME_CFG_BEACON_TX |
                         MT_BEACON_TIME_CFG_TBTT_EN);
@@ -486,9 +548,13 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
                napi_schedule(&dev->mt76.napi[i]);
        }
 
-       ieee80211_wake_queues(dev->mt76.hw);
-
-       mt76_txq_schedule_all(&dev->mt76);
+       if (restart) {
+               mt76x02_mcu_function_select(dev, Q_SELECT, 1);
+               ieee80211_restart_hw(dev->mt76.hw);
+       } else {
+               ieee80211_wake_queues(dev->mt76.hw);
+               mt76_txq_schedule_all(&dev->mt76);
+       }
 }
 
 static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
index a020c757ba5c6c59fba0463d01f5774a916e4339..a54b63a96eaefa24268f28573f1123c30ee6b9d5 100644 (file)
@@ -194,6 +194,8 @@ bool mt76x02_phy_adjust_vga_gain(struct mt76x02_dev *dev)
                ret = true;
        }
 
+       dev->cal.agc_lowest_gain = dev->cal.agc_gain_adjust >= limit;
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(mt76x02_phy_adjust_vga_gain);
index 43f07461c8d39b6045388c8bbe59b1d2b0fcd6e8..6fb52b596d421753ff24f247225136b30c15150b 100644 (file)
@@ -85,8 +85,9 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data,
 
        mt76x02_insert_hdr_pad(skb);
 
-       txwi = skb_push(skb, sizeof(struct mt76x02_txwi));
+       txwi = (struct mt76x02_txwi *)(skb->data - sizeof(struct mt76x02_txwi));
        mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len);
+       skb_push(skb, sizeof(struct mt76x02_txwi));
 
        pid = mt76_tx_status_skb_add(mdev, wcid, skb);
        txwi->pktid = pid;
index a48c261b0c634bca601f8d9fcd496b99a5bc2fa7..cd072ac614f76847b86618a4f40c87af171f5147 100644 (file)
@@ -237,6 +237,8 @@ int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
        struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
        int idx = 0;
 
+       memset(msta, 0, sizeof(*msta));
+
        idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid));
        if (idx < 0)
                return -ENOSPC;
@@ -274,6 +276,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
        struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
        struct mt76_txq *mtxq;
 
+       memset(mvif, 0, sizeof(*mvif));
+
        mvif->idx = idx;
        mvif->group_wcid.idx = MT_VIF_WCID(idx);
        mvif->group_wcid.hw_key_idx = -1;
@@ -289,6 +293,12 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        struct mt76x02_dev *dev = hw->priv;
        unsigned int idx = 0;
 
+       /* Allow to change address in HW if we create first interface. */
+       if (!dev->vif_mask &&
+           (((vif->addr[0] ^ dev->mt76.macaddr[0]) & ~GENMASK(4, 1)) ||
+            memcmp(vif->addr + 1, dev->mt76.macaddr + 1, ETH_ALEN - 1)))
+               mt76x02_mac_setaddr(dev, vif->addr);
+
        if (vif->addr[0] & BIT(1))
                idx = 1 + (((dev->mt76.macaddr[0] ^ vif->addr[0]) >> 2) & 7);
 
@@ -311,10 +321,6 @@ mt76x02_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
        if (dev->vif_mask & BIT(idx))
                return -EBUSY;
 
-       /* Allow to change address in HW if we create first interface. */
-       if (!dev->vif_mask && !ether_addr_equal(dev->mt76.macaddr, vif->addr))
-                mt76x02_mac_setaddr(dev, vif->addr);
-
        dev->vif_mask |= BIT(idx);
 
        mt76x02_vif_init(dev, vif, idx);
index f8534362e2c8cdf7edf9b03d5e2118d885d5ca6f..a30ef2c5a9db0433cbb696f6ab6f3e8b1811cb5d 100644 (file)
@@ -106,7 +106,7 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev)
                { MT_TX_SW_CFG1,                0x00010000 },
                { MT_TX_SW_CFG2,                0x00000000 },
                { MT_TXOP_CTRL_CFG,             0x0400583f },
-               { MT_TX_RTS_CFG,                0x00100020 },
+               { MT_TX_RTS_CFG,                0x00ffff20 },
                { MT_TX_TIMEOUT_CFG,            0x000a2290 },
                { MT_TX_RETRY_CFG,              0x47f01f0f },
                { MT_EXP_ACK_TIME,              0x002c00dc },
index 6c619f1c65c9cbfbbdf9241dc340c448826ab228..d7abe3d73badbbce7100781ece4b5c91407bc0dd 100644 (file)
@@ -71,6 +71,7 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level,
 
 void mt76x2_cleanup(struct mt76x02_dev *dev);
 
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard);
 void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable);
 void mt76x2_init_txpower(struct mt76x02_dev *dev,
                         struct ieee80211_supported_band *sband);
index 984d9c4c2e1a8ac9bfb435ef8de97898cb720b4d..d3927a13e92e91068344e431176a3b42ef6ff444 100644 (file)
@@ -77,7 +77,7 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev)
        }
 }
 
-static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
+int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard)
 {
        const u8 *macaddr = dev->mt76.macaddr;
        u32 val;
index 03e24ae7f66c7c8953e7308a1bac304ff294fb1b..605dc66ae83be45d956c0483e9688a8482f442c8 100644 (file)
@@ -165,9 +165,30 @@ error:
        return -ENOENT;
 }
 
+static int
+mt76pci_mcu_restart(struct mt76_dev *mdev)
+{
+       struct mt76x02_dev *dev;
+       int ret;
+
+       dev = container_of(mdev, struct mt76x02_dev, mt76);
+
+       mt76x02_mcu_cleanup(dev);
+       mt76x2_mac_reset(dev, true);
+
+       ret = mt76pci_load_firmware(dev);
+       if (ret)
+               return ret;
+
+       mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
+
+       return 0;
+}
+
 int mt76x2_mcu_init(struct mt76x02_dev *dev)
 {
        static const struct mt76_mcu_ops mt76x2_mcu_ops = {
+               .mcu_restart = mt76pci_mcu_restart,
                .mcu_send_msg = mt76x02_mcu_msg_send,
        };
        int ret;
index 1848e8ab2e21cfb6332fd17259986d6b932dbff6..769a9b9720442c5d303e9a65519a8fe1de166089 100644 (file)
@@ -260,10 +260,15 @@ mt76x2_phy_set_gain_val(struct mt76x02_dev *dev)
        gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust;
        gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust;
 
-       if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
+       val = 0x1836 << 16;
+       if (!mt76x2_has_ext_lna(dev) &&
+           dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40)
                val = 0x1e42 << 16;
-       else
-               val = 0x1836 << 16;
+
+       if (mt76x2_has_ext_lna(dev) &&
+           dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ &&
+           dev->mt76.chandef.width < NL80211_CHAN_WIDTH_40)
+               val = 0x0f36 << 16;
 
        val |= 0xf8;
 
@@ -280,6 +285,7 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
 {
        u8 *gain = dev->cal.agc_gain_init;
        u8 low_gain_delta, gain_delta;
+       u32 agc_35, agc_37;
        bool gain_change;
        int low_gain;
        u32 val;
@@ -318,6 +324,16 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
        else
                low_gain_delta = 14;
 
+       agc_37 = 0x2121262c;
+       if (dev->mt76.chandef.chan->band == NL80211_BAND_2GHZ)
+               agc_35 = 0x11111516;
+       else if (low_gain == 2)
+               agc_35 = agc_37 = 0x08080808;
+       else if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
+               agc_35 = 0x10101014;
+       else
+               agc_35 = 0x11111116;
+
        if (low_gain == 2) {
                mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990);
                mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808);
@@ -326,15 +342,13 @@ void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev)
                dev->cal.agc_gain_adjust = 0;
        } else {
                mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991);
-               if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80)
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014);
-               else
-                       mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116);
-               mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C);
                gain_delta = 0;
                dev->cal.agc_gain_adjust = low_gain_delta;
        }
 
+       mt76_wr(dev, MT_BBP(AGC, 35), agc_35);
+       mt76_wr(dev, MT_BBP(AGC, 37), agc_37);
+
        dev->cal.agc_gain_cur[0] = gain[0] - gain_delta;
        dev->cal.agc_gain_cur[1] = gain[1] - gain_delta;
        mt76x2_phy_set_gain_val(dev);
index ddb6b2c48e01283a8041620fdbc46542de8d04c7..ac0f13d4629963cea77e5ab113caf3e11a33275a 100644 (file)
 #include "mt76x2u.h"
 
 static const struct usb_device_id mt76x2u_device_table[] = {
-       { USB_DEVICE(0x0e8d, 0x7612) }, /* Alfa AWUS036ACM */
        { USB_DEVICE(0x0b05, 0x1833) }, /* Asus USB-AC54 */
        { USB_DEVICE(0x0b05, 0x17eb) }, /* Asus USB-AC55 */
        { USB_DEVICE(0x0b05, 0x180b) }, /* Asus USB-N53 B1 */
-       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USB-AC1200 */
+       { USB_DEVICE(0x0e8d, 0x7612) }, /* Aukey USBAC1200 - Alfa AWUS036ACM */
        { USB_DEVICE(0x057c, 0x8503) }, /* Avm FRITZ!WLAN AC860 */
        { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
        { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
@@ -66,6 +65,10 @@ static int mt76x2u_probe(struct usb_interface *intf,
 
        mdev->rev = mt76_rr(dev, MT_ASIC_VERSION);
        dev_info(mdev->dev, "ASIC revision: %08x\n", mdev->rev);
+       if (!is_mt76x2(dev)) {
+               err = -ENODEV;
+               goto err;
+       }
 
        err = mt76x2u_register_device(dev);
        if (err < 0)
index 5e84b4535cb1456c22bbf883d79c9d44c62da438..3b82345756ea90d3cc93f71946c2ec6cb7e81110 100644 (file)
@@ -93,7 +93,6 @@ int mt76x2u_mac_reset(struct mt76x02_dev *dev)
        mt76_wr(dev, MT_TX_LINK_CFG, 0x1020);
        mt76_wr(dev, MT_AUTO_RSP_CFG, 0x13);
        mt76_wr(dev, MT_MAX_LEN_CFG, 0x2f00);
-       mt76_wr(dev, MT_TX_RTS_CFG, 0x92b20);
 
        mt76_wr(dev, MT_WMM_AIFSN, 0x2273);
        mt76_wr(dev, MT_WMM_CWMIN, 0x2344);
index 5a349fe3e576f606ec2fb1997ed8a394d54d7b3f..2585df5123350ba8adf52cf14ef6133de7e03f77 100644 (file)
@@ -289,8 +289,11 @@ mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
        dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
        dev->queue_ops->kick(dev, q);
 
-       if (q->queued > q->ndesc - 8)
+       if (q->queued > q->ndesc - 8 && !q->stopped) {
                ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
+               q->stopped = true;
+       }
+
        spin_unlock_bh(&q->lock);
 }
 EXPORT_SYMBOL_GPL(mt76_tx);
@@ -374,7 +377,10 @@ mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
        if (last_skb) {
                mt76_queue_ps_skb(dev, sta, last_skb, true);
                dev->queue_ops->kick(dev, hwq);
+       } else {
+               ieee80211_sta_eosp(sta);
        }
+
        spin_unlock_bh(&hwq->lock);
 }
 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
@@ -577,6 +583,9 @@ void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
        struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
        struct mt76_queue *hwq = mtxq->hwq;
 
+       if (!test_bit(MT76_STATE_RUNNING, &dev->state))
+               return;
+
        spin_lock_bh(&hwq->lock);
        if (list_empty(&mtxq->list))
                list_add_tail(&mtxq->list, &hwq->swq);
index ae6ada370597a6f0ebfd3b7d69bb583ecf8759ef..4c1abd4924054c6f377ca062ad98ca5bb8446dc7 100644 (file)
@@ -655,7 +655,11 @@ static void mt76u_tx_tasklet(unsigned long data)
                        spin_lock_bh(&q->lock);
                }
                mt76_txq_schedule(dev, q);
-               wake = i < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
+
+               wake = q->stopped && q->queued < q->ndesc - 8;
+               if (wake)
+                       q->stopped = false;
+
                if (!q->queued)
                        wake_up(&dev->tx_wait);
 
index d8b7863f79261a3275b6641ffdf7607e23fdd206..6ae7f14dc9bf936ec3ae34f9f4d4f1ee67de6646 100644 (file)
@@ -303,6 +303,10 @@ static int mt7601u_probe(struct usb_interface *usb_intf,
        mac_rev = mt7601u_rr(dev, MT_MAC_CSR0);
        dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n",
                 asic_rev, mac_rev);
+       if ((asic_rev >> 16) != 0x7601) {
+               ret = -ENODEV;
+               goto err;
+       }
 
        /* Note: vendor driver skips this check for MT7601U */
        if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL))
index 2839bb70badfbcb8284bc5bbbc1f457bd3b58c63..f0716f6ce41fa2a1ad993e45adba9148d7f0c120 100644 (file)
@@ -404,15 +404,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
 static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
                struct nvme_ns *ns)
 {
-       enum nvme_ana_state old;
-
        mutex_lock(&ns->head->lock);
-       old = ns->ana_state;
        ns->ana_grpid = le32_to_cpu(desc->grpid);
        ns->ana_state = desc->state;
        clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
 
-       if (nvme_state_is_live(ns->ana_state) && !nvme_state_is_live(old))
+       if (nvme_state_is_live(ns->ana_state))
                nvme_mpath_set_live(ns);
        mutex_unlock(&ns->head->lock);
 }
index e7e08889865e732d503a6ac2af5d38cac4dd9672..68c49dd672104d82ea768a6e9bf4354df731422b 100644 (file)
@@ -627,7 +627,7 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
        return ret;
 }
 
-static inline void nvme_tcp_end_request(struct request *rq, __le16 status)
+static inline void nvme_tcp_end_request(struct request *rq, u16 status)
 {
        union nvme_result res = {};
 
index 2d73b66e368627cdee268a74d30fb3c5d6a34235..b3e765a95af8ee7447c536ff48095504c8100d67 100644 (file)
@@ -509,7 +509,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
 
        ret = nvmet_p2pmem_ns_enable(ns);
        if (ret)
-               goto out_unlock;
+               goto out_dev_disable;
 
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                nvmet_p2pmem_ns_add_p2p(ctrl, ns);
@@ -550,7 +550,7 @@ out_unlock:
 out_dev_put:
        list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
                pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
-
+out_dev_disable:
        nvmet_ns_dev_disable(ns);
        goto out_unlock;
 }
index 3e43212d3c1c6bba5a6d553dc2a965188c5ccbf5..bc6ebb51b0bf7c5310940fca19450fd115ea7788 100644 (file)
@@ -75,11 +75,11 @@ err:
        return ret;
 }
 
-static void nvmet_file_init_bvec(struct bio_vec *bv, struct sg_page_iter *iter)
+static void nvmet_file_init_bvec(struct bio_vec *bv, struct scatterlist *sg)
 {
-       bv->bv_page = sg_page_iter_page(iter);
-       bv->bv_offset = iter->sg->offset;
-       bv->bv_len = PAGE_SIZE - iter->sg->offset;
+       bv->bv_page = sg_page(sg);
+       bv->bv_offset = sg->offset;
+       bv->bv_len = sg->length;
 }
 
 static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
@@ -128,14 +128,14 @@ static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
 
 static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
-       struct sg_page_iter sg_pg_iter;
+       ssize_t nr_bvec = req->sg_cnt;
        unsigned long bv_cnt = 0;
        bool is_sync = false;
        size_t len = 0, total_len = 0;
        ssize_t ret = 0;
        loff_t pos;
-
+       int i;
+       struct scatterlist *sg;
 
        if (req->f.mpool_alloc && nr_bvec > NVMET_MAX_MPOOL_BVEC)
                is_sync = true;
@@ -147,8 +147,8 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
        }
 
        memset(&req->f.iocb, 0, sizeof(struct kiocb));
-       for_each_sg_page(req->sg, &sg_pg_iter, req->sg_cnt, 0) {
-               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], &sg_pg_iter);
+       for_each_sg(req->sg, sg, req->sg_cnt, i) {
+               nvmet_file_init_bvec(&req->f.bvec[bv_cnt], sg);
                len += req->f.bvec[bv_cnt].bv_len;
                total_len += req->f.bvec[bv_cnt].bv_len;
                bv_cnt++;
@@ -225,7 +225,7 @@ static void nvmet_file_submit_buffered_io(struct nvmet_req *req)
 
 static void nvmet_file_execute_rw(struct nvmet_req *req)
 {
-       ssize_t nr_bvec = DIV_ROUND_UP(req->data_len, PAGE_SIZE);
+       ssize_t nr_bvec = req->sg_cnt;
 
        if (!req->sg_cnt || !nr_bvec) {
                nvmet_req_complete(req, 0);
index 56dd83a45e55dc21360f729c488a213a2a258241..5484a46dafda857a7e64207ccac4a8249cf1512c 100644 (file)
@@ -213,12 +213,10 @@ void parport_daisy_fini(struct parport *port)
 struct pardevice *parport_open(int devnum, const char *name)
 {
        struct daisydev *p = topology;
-       struct pardev_cb par_cb;
        struct parport *port;
        struct pardevice *dev;
        int daisy;
 
-       memset(&par_cb, 0, sizeof(par_cb));
        spin_lock(&topology_lock);
        while (p && p->devnum != devnum)
                p = p->next;
@@ -232,7 +230,7 @@ struct pardevice *parport_open(int devnum, const char *name)
        port = parport_get_port(p->port);
        spin_unlock(&topology_lock);
 
-       dev = parport_register_dev_model(port, name, &par_cb, devnum);
+       dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL);
        parport_put_port(port);
        if (!dev)
                return NULL;
@@ -482,31 +480,3 @@ static int assign_addrs(struct parport *port)
        kfree(deviceid);
        return detected;
 }
-
-static int daisy_drv_probe(struct pardevice *par_dev)
-{
-       struct device_driver *drv = par_dev->dev.driver;
-
-       if (strcmp(drv->name, "daisy_drv"))
-               return -ENODEV;
-       if (strcmp(par_dev->name, daisy_dev_name))
-               return -ENODEV;
-
-       return 0;
-}
-
-static struct parport_driver daisy_driver = {
-       .name = "daisy_drv",
-       .probe = daisy_drv_probe,
-       .devmodel = true,
-};
-
-int daisy_drv_init(void)
-{
-       return parport_register_driver(&daisy_driver);
-}
-
-void daisy_drv_exit(void)
-{
-       parport_unregister_driver(&daisy_driver);
-}
index e5e6a463a9412e167a9e2b2c34f4a6cfb3a1cb2a..e035174ba205d12dbc6e529c6ec85c8bda9e5d21 100644 (file)
@@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer,
 ssize_t parport_device_id (int devnum, char *buffer, size_t count)
 {
        ssize_t retval = -ENXIO;
-       struct pardevice *dev = parport_open(devnum, daisy_dev_name);
+       struct pardevice *dev = parport_open (devnum, "Device ID probe");
        if (!dev)
                return -ENXIO;
 
index 0171b8dbcdcd5f57c54eeee19eed23d65dbd8897..5dc53d420ca8ca805c0c036c23e3c1a3fc42ac00 100644 (file)
@@ -137,19 +137,11 @@ static struct bus_type parport_bus_type = {
 
 int parport_bus_init(void)
 {
-       int retval;
-
-       retval = bus_register(&parport_bus_type);
-       if (retval)
-               return retval;
-       daisy_drv_init();
-
-       return 0;
+       return bus_register(&parport_bus_type);
 }
 
 void parport_bus_exit(void)
 {
-       daisy_drv_exit();
        bus_unregister(&parport_bus_type);
 }
 
index 224d886341158ba55494da1c766a933b0cfaeefc..d994839a3e24b5ec8c1452f3489c47d8fa7aba20 100644 (file)
@@ -273,6 +273,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
 u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
                           enum pcie_link_width *width);
 void __pcie_print_link_status(struct pci_dev *dev, bool verbose);
+void pcie_report_downtraining(struct pci_dev *dev);
 
 /* Single Root I/O Virtualization */
 struct pci_sriov {
index d2eae3b7cc0f74d5c8fdec80fa6ffffd68dd8501..4fa9e3523ee1a22bc763aa5ea0f162dc00ab09dd 100644 (file)
@@ -30,6 +30,8 @@ static void pcie_enable_link_bandwidth_notification(struct pci_dev *dev)
 {
        u16 lnk_ctl;
 
+       pcie_capability_write_word(dev, PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_LBMS);
+
        pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &lnk_ctl);
        lnk_ctl |= PCI_EXP_LNKCTL_LBMIE;
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
@@ -44,11 +46,10 @@ static void pcie_disable_link_bandwidth_notification(struct pci_dev *dev)
        pcie_capability_write_word(dev, PCI_EXP_LNKCTL, lnk_ctl);
 }
 
-static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+static irqreturn_t pcie_bw_notification_irq(int irq, void *context)
 {
        struct pcie_device *srv = context;
        struct pci_dev *port = srv->port;
-       struct pci_dev *dev;
        u16 link_status, events;
        int ret;
 
@@ -58,17 +59,26 @@ static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
        if (ret != PCIBIOS_SUCCESSFUL || !events)
                return IRQ_NONE;
 
+       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
+       pcie_update_link_speed(port->subordinate, link_status);
+       return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t pcie_bw_notification_handler(int irq, void *context)
+{
+       struct pcie_device *srv = context;
+       struct pci_dev *port = srv->port;
+       struct pci_dev *dev;
+
        /*
         * Print status from downstream devices, not this root port or
         * downstream switch port.
         */
        down_read(&pci_bus_sem);
        list_for_each_entry(dev, &port->subordinate->devices, bus_list)
-               __pcie_print_link_status(dev, false);
+               pcie_report_downtraining(dev);
        up_read(&pci_bus_sem);
 
-       pcie_update_link_speed(port->subordinate, link_status);
-       pcie_capability_write_word(port, PCI_EXP_LNKSTA, events);
        return IRQ_HANDLED;
 }
 
@@ -80,7 +90,8 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
        if (!pcie_link_bandwidth_notification_supported(srv->port))
                return -ENODEV;
 
-       ret = request_threaded_irq(srv->irq, NULL, pcie_bw_notification_handler,
+       ret = request_threaded_irq(srv->irq, pcie_bw_notification_irq,
+                                  pcie_bw_notification_handler,
                                   IRQF_SHARED, "PCIe BW notif", srv);
        if (ret)
                return ret;
index 2ec0df04e0dca15ce1f56b3f9049280f199e0928..7e12d016386394ab9b401f3e5dcb8da8b917484c 100644 (file)
@@ -2388,7 +2388,7 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
        return dev;
 }
 
-static void pcie_report_downtraining(struct pci_dev *dev)
+void pcie_report_downtraining(struct pci_dev *dev)
 {
        if (!pci_is_pcie(dev))
                return;
index 5163097b43dff1472af1b905936588750d45b9a8..4bbd9ede38c8355a9bf226e80eaabc19bc9eda6e 100644 (file)
@@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
        struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
        int new_mode;
 
-       if (phy->index != 0)
+       if (phy->index != 0) {
+               if (mode == PHY_MODE_USB_HOST)
+                       return 0;
                return -EINVAL;
+       }
 
        switch (mode) {
        case PHY_MODE_USB_HOST:
index 4159c63a5fd2bbba9b9c2949fde8c56ba9030a89..a835b31aad999dcbc90847455b0c75f612aba563 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/crw.h>
 #include <asm/isc.h>
 #include <asm/ebcdic.h>
+#include <asm/ap.h>
 
 #include "css.h"
 #include "cio.h"
@@ -586,6 +587,15 @@ static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area)
                              " failed (rc=%d).\n", ret);
 }
 
+static void chsc_process_sei_ap_cfg_chg(struct chsc_sei_nt0_area *sei_area)
+{
+       CIO_CRW_EVENT(3, "chsc: ap config changed\n");
+       if (sei_area->rs != 5)
+               return;
+
+       ap_bus_cfg_chg();
+}
+
 static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area)
 {
        switch (sei_area->cc) {
@@ -612,6 +622,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
        case 2: /* i/o resource accessibility */
                chsc_process_sei_res_acc(sei_area);
                break;
+       case 3: /* ap config changed */
+               chsc_process_sei_ap_cfg_chg(sei_area);
+               break;
        case 7: /* channel-path-availability information */
                chsc_process_sei_chp_avail(sei_area);
                break;
index a10cec0e86eb495ffd45f3854a09e1a76bf3e598..0b3b9de45c602042384751921379b0d903e5be79 100644 (file)
@@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
 {
        struct vfio_ccw_private *private;
        struct irb *irb;
+       bool is_final;
 
        private = container_of(work, struct vfio_ccw_private, io_work);
        irb = &private->irb;
 
+       is_final = !(scsw_actl(&irb->scsw) &
+                    (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
        if (scsw_is_solicited(&irb->scsw)) {
                cp_update_scsw(&private->cp, &irb->scsw);
-               cp_free(&private->cp);
+               if (is_final)
+                       cp_free(&private->cp);
        }
        memcpy(private->io_region->irb_area, irb, sizeof(*irb));
 
        if (private->io_trigger)
                eventfd_signal(private->io_trigger, 1);
 
-       if (private->mdev)
+       if (private->mdev && is_final)
                private->state = VFIO_CCW_STATE_IDLE;
 }
 
index e15816ff126582f933c66add86bb45e7b0606e0f..1546389d71dbca7ebc1f2f103780182742226376 100644 (file)
@@ -810,11 +810,18 @@ static int ap_device_remove(struct device *dev)
        struct ap_device *ap_dev = to_ap_dev(dev);
        struct ap_driver *ap_drv = ap_dev->drv;
 
+       /* prepare ap queue device removal */
        if (is_queue_dev(dev))
-               ap_queue_remove(to_ap_queue(dev));
+               ap_queue_prepare_remove(to_ap_queue(dev));
+
+       /* driver's chance to clean up gracefully */
        if (ap_drv->remove)
                ap_drv->remove(ap_dev);
 
+       /* now do the ap queue device remove */
+       if (is_queue_dev(dev))
+               ap_queue_remove(to_ap_queue(dev));
+
        /* Remove queue/card from list of active queues/cards */
        spin_lock_bh(&ap_list_lock);
        if (is_card_dev(dev))
@@ -860,6 +867,16 @@ void ap_bus_force_rescan(void)
 }
 EXPORT_SYMBOL(ap_bus_force_rescan);
 
+/*
+* A config change has happened, force an ap bus rescan.
+*/
+void ap_bus_cfg_chg(void)
+{
+       AP_DBF(DBF_INFO, "%s config change, forcing bus rescan\n", __func__);
+
+       ap_bus_force_rescan();
+}
+
 /*
  * hex2bitmap() - parse hex mask string and set bitmap.
  * Valid strings are "0x012345678" with at least one valid hex number.
index d0059eae5d94bd51a5c677c28162ed63c9f0d437..15a98a673c5cc3323980f15e95d3418b1c65e028 100644 (file)
@@ -91,6 +91,7 @@ enum ap_state {
        AP_STATE_WORKING,
        AP_STATE_QUEUE_FULL,
        AP_STATE_SUSPEND_WAIT,
+       AP_STATE_REMOVE,        /* about to be removed from driver */
        AP_STATE_UNBOUND,       /* momentary not bound to a driver */
        AP_STATE_BORKED,        /* broken */
        NR_AP_STATES
@@ -252,6 +253,7 @@ void ap_bus_force_rescan(void);
 
 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *ap_msg);
 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type);
+void ap_queue_prepare_remove(struct ap_queue *aq);
 void ap_queue_remove(struct ap_queue *aq);
 void ap_queue_suspend(struct ap_device *ap_dev);
 void ap_queue_resume(struct ap_device *ap_dev);
index ba261210c6da0518fe7f8f4cb8f702b0503464b9..6a340f2c355693170776992c6a1d018e78d6ee96 100644 (file)
@@ -420,6 +420,10 @@ static ap_func_t *ap_jumptable[NR_AP_STATES][NR_AP_EVENTS] = {
                [AP_EVENT_POLL] = ap_sm_suspend_read,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
        },
+       [AP_STATE_REMOVE] = {
+               [AP_EVENT_POLL] = ap_sm_nop,
+               [AP_EVENT_TIMEOUT] = ap_sm_nop,
+       },
        [AP_STATE_UNBOUND] = {
                [AP_EVENT_POLL] = ap_sm_nop,
                [AP_EVENT_TIMEOUT] = ap_sm_nop,
@@ -740,18 +744,31 @@ void ap_flush_queue(struct ap_queue *aq)
 }
 EXPORT_SYMBOL(ap_flush_queue);
 
-void ap_queue_remove(struct ap_queue *aq)
+void ap_queue_prepare_remove(struct ap_queue *aq)
 {
-       ap_flush_queue(aq);
+       spin_lock_bh(&aq->lock);
+       /* flush queue */
+       __ap_flush_queue(aq);
+       /* set REMOVE state to prevent new messages are queued in */
+       aq->state = AP_STATE_REMOVE;
        del_timer_sync(&aq->timeout);
+       spin_unlock_bh(&aq->lock);
+}
 
-       /* reset with zero, also clears irq registration */
+void ap_queue_remove(struct ap_queue *aq)
+{
+       /*
+        * all messages have been flushed and the state is
+        * AP_STATE_REMOVE. Now reset with zero which also
+        * clears the irq registration and move the state
+        * to AP_STATE_UNBOUND to signal that this queue
+        * is not used by any driver currently.
+        */
        spin_lock_bh(&aq->lock);
        ap_zapq(aq->qid);
        aq->state = AP_STATE_UNBOUND;
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_remove);
 
 void ap_queue_reinit_state(struct ap_queue *aq)
 {
@@ -760,4 +777,3 @@ void ap_queue_reinit_state(struct ap_queue *aq)
        ap_wait(ap_sm_event(aq, AP_EVENT_POLL));
        spin_unlock_bh(&aq->lock);
 }
-EXPORT_SYMBOL(ap_queue_reinit_state);
index eb93c2d27d0ad142c4d977d74df3e415468336af..689c2af7026a3adcf08e2e6eb019d9352e6de9d4 100644 (file)
@@ -586,6 +586,7 @@ static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
 
 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
                                                     struct zcrypt_queue *zq,
+                                                    struct module **pmod,
                                                     unsigned int weight)
 {
        if (!zq || !try_module_get(zq->queue->ap_dev.drv->driver.owner))
@@ -595,15 +596,15 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
        atomic_add(weight, &zc->load);
        atomic_add(weight, &zq->load);
        zq->request_count++;
+       *pmod = zq->queue->ap_dev.drv->driver.owner;
        return zq;
 }
 
 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
                                     struct zcrypt_queue *zq,
+                                    struct module *mod,
                                     unsigned int weight)
 {
-       struct module *mod = zq->queue->ap_dev.drv->driver.owner;
-
        zq->request_count--;
        atomic_sub(weight, &zc->load);
        atomic_sub(weight, &zq->load);
@@ -653,6 +654,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
 
@@ -706,7 +708,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -718,7 +720,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo(pref_zq, mex);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -735,6 +737,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        unsigned int weight, pref_weight;
        unsigned int func_code;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(crt, TP_ICARSACRT);
 
@@ -788,7 +791,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -800,7 +803,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
        rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -819,6 +822,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        unsigned int func_code;
        unsigned short *domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcRB, TB_ZSECSENDCPRB);
 
@@ -865,7 +869,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -881,7 +885,7 @@ static long _zcrypt_send_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_cprb(pref_zq, xcRB, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
@@ -932,6 +936,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        unsigned int func_code;
        struct ap_message ap_msg;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
 
@@ -1000,7 +1005,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1012,7 +1017,7 @@ static long zcrypt_send_ep11_cprb(struct ap_perms *perms,
        rc = pref_zq->ops->send_ep11_cprb(pref_zq, xcrb, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out_free:
@@ -1033,6 +1038,7 @@ static long zcrypt_rng(char *buffer)
        struct ap_message ap_msg;
        unsigned int domain;
        int qid = 0, rc = -ENODEV;
+       struct module *mod;
 
        trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
 
@@ -1064,7 +1070,7 @@ static long zcrypt_rng(char *buffer)
                        pref_weight = weight;
                }
        }
-       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, weight);
+       pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
        if (!pref_zq) {
@@ -1076,7 +1082,7 @@ static long zcrypt_rng(char *buffer)
        rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
 
        spin_lock(&zcrypt_list_lock);
-       zcrypt_drop_queue(pref_zc, pref_zq, weight);
+       zcrypt_drop_queue(pref_zc, pref_zq, mod, weight);
        spin_unlock(&zcrypt_list_lock);
 
 out:
index 197b0f5b63e7183473b91a0d8d8f728bb3e0c16e..44bd6f04c145da55b1aef66ad147983d74cedf9a 100644 (file)
@@ -1150,13 +1150,16 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
 
 static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
 {
+       struct sk_buff *skb;
+
        /* release may never happen from within CQ tasklet scope */
        WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
 
        if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
                qeth_notify_skbs(buf->q, buf, TX_NOTIFY_GENERALERROR);
 
-       __skb_queue_purge(&buf->skb_list);
+       while ((skb = __skb_dequeue(&buf->skb_list)) != NULL)
+               consume_skb(skb);
 }
 
 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
index 8efb2e8ff8f460adacd53376d5cddd3fc6953b03..c3067fd3bd9ee47ad79d106cd3b17067ea91fbf3 100644 (file)
@@ -629,8 +629,7 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb,
        } /* else fall through */
 
        QETH_TXQ_STAT_INC(queue, tx_dropped);
-       QETH_TXQ_STAT_INC(queue, tx_errors);
-       dev_kfree_skb_any(skb);
+       kfree_skb(skb);
        netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
@@ -645,6 +644,8 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc;
 
+       qeth_l2_vnicc_set_defaults(card);
+
        if (gdev->dev.type == &qeth_generic_devtype) {
                rc = qeth_l2_create_device_attributes(&gdev->dev);
                if (rc)
@@ -652,8 +653,6 @@ static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
        }
 
        hash_init(card->mac_htable);
-       card->info.hwtrap = 0;
-       qeth_l2_vnicc_set_defaults(card);
        return 0;
 }
 
index 7e68d9d16859d24eaae38d3f0a8079f40cbabdf2..53712cf2640659cb0da642ba1dc05bb03e6567ec 100644 (file)
@@ -2096,8 +2096,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb,
 
 tx_drop:
        QETH_TXQ_STAT_INC(queue, tx_dropped);
-       QETH_TXQ_STAT_INC(queue, tx_errors);
-       dev_kfree_skb_any(skb);
+       kfree_skb(skb);
        netif_wake_queue(dev);
        return NETDEV_TX_OK;
 }
@@ -2253,14 +2252,15 @@ static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
        struct qeth_card *card = dev_get_drvdata(&gdev->dev);
        int rc;
 
+       hash_init(card->ip_htable);
+
        if (gdev->dev.type == &qeth_generic_devtype) {
                rc = qeth_l3_create_device_attributes(&gdev->dev);
                if (rc)
                        return rc;
        }
-       hash_init(card->ip_htable);
+
        hash_init(card->ip_mc_htable);
-       card->info.hwtrap = 0;
        return 0;
 }
 
index 744a64680d5b0d16c982012bfe2b351becd54a9a..e8fc28dba8dfc3521532c3c87d26d199b8ed9b6c 100644 (file)
@@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
        add_timer(&erp_action->timer);
 }
 
+void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                    int clear, char *dbftag)
+{
+       unsigned long flags;
+       struct zfcp_port *port;
+
+       write_lock_irqsave(&adapter->erp_lock, flags);
+       read_lock(&adapter->port_list_lock);
+       list_for_each_entry(port, &adapter->port_list, list)
+               _zfcp_erp_port_forced_reopen(port, clear, dbftag);
+       read_unlock(&adapter->port_list_lock);
+       write_unlock_irqrestore(&adapter->erp_lock, flags);
+}
+
 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
                                      int clear, char *dbftag)
 {
@@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
                struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
                int lun_status;
 
+               if (sdev->sdev_state == SDEV_DEL ||
+                   sdev->sdev_state == SDEV_CANCEL)
+                       continue;
                if (zsdev->port != port)
                        continue;
                /* LUN under port of interest */
index 3fce47b0b21b55142a64bb3b838bf28168ddd89e..c6acca521ffec71ee7b3f7e7231a32b18fdceff7 100644 (file)
@@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
                                 char *dbftag);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
+extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
+                                           int clear, char *dbftag);
 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
index db00b5e3abbe361143c83dc5d6becfaa0e62aac0..33eddb02ee300238897f0f9018119717b387fd58 100644 (file)
@@ -239,10 +239,6 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
        list_for_each_entry(port, &adapter->port_list, list) {
                if ((port->d_id & range) == (ntoh24(page->rscn_fid) & range))
                        zfcp_fc_test_link(port);
-               if (!port->d_id)
-                       zfcp_erp_port_reopen(port,
-                                            ZFCP_STATUS_COMMON_ERP_FAILED,
-                                            "fcrscn1");
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 }
@@ -250,6 +246,7 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
 static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
 {
        struct fsf_status_read_buffer *status_buffer = (void *)fsf_req->data;
+       struct zfcp_adapter *adapter = fsf_req->adapter;
        struct fc_els_rscn *head;
        struct fc_els_rscn_page *page;
        u16 i;
@@ -263,6 +260,22 @@ static void zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req)
        no_entries = be16_to_cpu(head->rscn_plen) /
                sizeof(struct fc_els_rscn_page);
 
+       if (no_entries > 1) {
+               /* handle failed ports */
+               unsigned long flags;
+               struct zfcp_port *port;
+
+               read_lock_irqsave(&adapter->port_list_lock, flags);
+               list_for_each_entry(port, &adapter->port_list, list) {
+                       if (port->d_id)
+                               continue;
+                       zfcp_erp_port_reopen(port,
+                                            ZFCP_STATUS_COMMON_ERP_FAILED,
+                                            "fcrscn1");
+               }
+               read_unlock_irqrestore(&adapter->port_list_lock, flags);
+       }
+
        for (i = 1; i < no_entries; i++) {
                /* skip head and start with 1st element */
                page++;
index f4f6a07c52220234fb0e865ca3f0d87a2d2fdbe0..221d0dfb849329eb5ebf1758004628301b500ba8 100644 (file)
@@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
        struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
        int ret = SUCCESS, fc_ret;
 
+       if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+               zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
+               zfcp_erp_wait(adapter);
+       }
        zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
        zfcp_erp_wait(adapter);
        fc_ret = fc_block_scsi_eh(scpnt);
index 1df5171594b89dc70087def629200c30b4149d1f..11fb68d7e60de6ed5ab388250691b647cbc030bc 100644 (file)
@@ -2640,9 +2640,14 @@ static inline unsigned int cap_to_cyls(sector_t capacity, unsigned divisor)
        return capacity;
 }
 
+static inline int aac_pci_offline(struct aac_dev *dev)
+{
+       return pci_channel_offline(dev->pdev) || dev->handle_pci_error;
+}
+
 static inline int aac_adapter_check_health(struct aac_dev *dev)
 {
-       if (unlikely(pci_channel_offline(dev->pdev)))
+       if (unlikely(aac_pci_offline(dev)))
                return -1;
 
        return (dev)->a_ops.adapter_check_health(dev);
index e67e032936ef015b66c242eaf9c3111cfb3812c2..78430a7b294c6e651024300d86aaec5eecbe53c4 100644 (file)
@@ -672,7 +672,7 @@ int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
                                        return -ETIMEDOUT;
                                }
 
-                               if (unlikely(pci_channel_offline(dev->pdev)))
+                               if (unlikely(aac_pci_offline(dev)))
                                        return -EFAULT;
 
                                if ((blink = aac_adapter_check_health(dev)) > 0) {
@@ -772,7 +772,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
 
                spin_unlock_irqrestore(&fibptr->event_lock, flags);
 
-               if (unlikely(pci_channel_offline(dev->pdev)))
+               if (unlikely(aac_pci_offline(dev)))
                        return -EFAULT;
 
                fibptr->flags |= FIB_CONTEXT_FLAG_WAIT;
index dbaa4f131433abde497c843c17e2b0e677b4544e..3ad997ac351034bd2e556a17c6f9d14addf2921a 100644 (file)
@@ -139,6 +139,7 @@ static const struct {
        { IBMVFC_FC_FAILURE, IBMVFC_VENDOR_SPECIFIC, DID_ERROR, 1, 1, "vendor specific" },
 
        { IBMVFC_FC_SCSI_ERROR, 0, DID_OK, 1, 0, "SCSI error" },
+       { IBMVFC_FC_SCSI_ERROR, IBMVFC_COMMAND_FAILED, DID_ERROR, 0, 1, "PRLI to device failed." },
 };
 
 static void ibmvfc_npiv_login(struct ibmvfc_host *);
@@ -1494,9 +1495,9 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
        if (rsp->flags & FCP_RSP_LEN_VALID)
                rsp_code = rsp->data.info.rsp_code;
 
-       scmd_printk(KERN_ERR, cmnd, "Command (%02X) failed: %s (%x:%x) "
+       scmd_printk(KERN_ERR, cmnd, "Command (%02X) : %s (%x:%x) "
                    "flags: %x fcp_rsp: %x, resid=%d, scsi_status: %x\n",
-                   cmnd->cmnd[0], err, vfc_cmd->status, vfc_cmd->error,
+                   cmnd->cmnd[0], err, be16_to_cpu(vfc_cmd->status), be16_to_cpu(vfc_cmd->error),
                    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 
@@ -2022,7 +2023,7 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
                sdev_printk(KERN_ERR, sdev, "%s reset failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n", desc,
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2381,7 +2382,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
                sdev_printk(KERN_ERR, sdev, "Abort failed: %s (%x:%x) "
                            "flags: %x fcp_rsp: %x, scsi_status: %x\n",
                            ibmvfc_get_cmd_error(be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error)),
-                           rsp_iu.cmd.status, rsp_iu.cmd.error, fc_rsp->flags, rsp_code,
+                           be16_to_cpu(rsp_iu.cmd.status), be16_to_cpu(rsp_iu.cmd.error), fc_rsp->flags, rsp_code,
                            fc_rsp->scsi_status);
                rsp_rc = -EIO;
        } else
@@ -2755,16 +2756,18 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
                if (crq->format == IBMVFC_PARTITION_MIGRATED) {
                        /* We need to re-setup the interpartition connection */
-                       dev_info(vhost->dev, "Re-enabling adapter\n");
+                       dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n");
                        vhost->client_migrated = 1;
                        ibmvfc_purge_requests(vhost, DID_REQUEUE);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE);
-               } else {
-                       dev_err(vhost->dev, "Virtual adapter failed (rc=%d)\n", crq->format);
+               } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) {
+                       dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format);
                        ibmvfc_purge_requests(vhost, DID_ERROR);
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_RESET);
+               } else {
+                       dev_err(vhost->dev, "Received unknown transport event from partner (rc=%d)\n", crq->format);
                }
                return;
        case IBMVFC_CRQ_CMD_RSP:
@@ -3348,7 +3351,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error), status);
                break;
        }
 
@@ -3446,9 +3449,10 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
 
                tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)), rsp->status, rsp->error,
-                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), rsp->fc_type,
-                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), rsp->fc_explain, status);
+                       ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
+                                            be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_ls_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain), status);
                break;
        }
 
@@ -3619,7 +3623,7 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
                fc_explain = (be32_to_cpu(mad->fc_iu.response[1]) & 0x0000ff00) >> 8;
                tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                         ibmvfc_get_cmd_error(be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error)),
-                        mad->iu.status, mad->iu.error,
+                        be16_to_cpu(mad->iu.status), be16_to_cpu(mad->iu.error),
                         ibmvfc_get_fc_type(fc_reason), fc_reason,
                         ibmvfc_get_ls_explain(fc_explain), fc_explain, status);
                break;
@@ -3831,9 +3835,10 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
 
                tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
                        ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                       rsp->status, rsp->error, ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)),
-                       rsp->fc_type, ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)),
-                       rsp->fc_explain, status);
+                       be16_to_cpu(rsp->status), be16_to_cpu(rsp->error),
+                       ibmvfc_get_fc_type(be16_to_cpu(rsp->fc_type)), be16_to_cpu(rsp->fc_type),
+                       ibmvfc_get_gs_explain(be16_to_cpu(rsp->fc_explain)), be16_to_cpu(rsp->fc_explain),
+                       status);
                break;
        }
 
@@ -3959,7 +3964,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
                level += ibmvfc_retry_host_init(vhost);
                ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                          rsp->status, rsp->error);
+                          be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                break;
        case IBMVFC_MAD_DRIVER_FAILED:
                break;
@@ -4024,7 +4029,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
                ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
                           ibmvfc_get_cmd_error(be16_to_cpu(rsp->status), be16_to_cpu(rsp->error)),
-                                               rsp->status, rsp->error);
+                                               be16_to_cpu(rsp->status), be16_to_cpu(rsp->error));
                ibmvfc_free_event(evt);
                return;
        case IBMVFC_MAD_CRQ_ERROR:
index b81a53c4a9a8b1020a96a85fd5e84cde2adb9900..459cc288ba1d01abe63c28454bf73c7190bb64a4 100644 (file)
@@ -78,9 +78,14 @@ enum ibmvfc_crq_valid {
        IBMVFC_CRQ_XPORT_EVENT          = 0xFF,
 };
 
-enum ibmvfc_crq_format {
+enum ibmvfc_crq_init_msg {
        IBMVFC_CRQ_INIT                 = 0x01,
        IBMVFC_CRQ_INIT_COMPLETE        = 0x02,
+};
+
+enum ibmvfc_crq_xport_evts {
+       IBMVFC_PARTNER_FAILED           = 0x01,
+       IBMVFC_PARTNER_DEREGISTER       = 0x02,
        IBMVFC_PARTITION_MIGRATED       = 0x06,
 };
 
index e57774472e752013ce762912a6ceec512905fc2a..1d8c584ec1e9197595acf2baa61bccae4305b646 100644 (file)
@@ -3281,12 +3281,18 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 
        if (smid < ioc->hi_priority_smid) {
                struct scsiio_tracker *st;
+               void *request;
 
                st = _get_st_from_smid(ioc, smid);
                if (!st) {
                        _base_recovery_check(ioc);
                        return;
                }
+
+               /* Clear MPI request frame */
+               request = mpt3sas_base_get_msg_frame(ioc, smid);
+               memset(request, 0, ioc->request_sz);
+
                mpt3sas_base_clear_st(ioc, st);
                _base_recovery_check(ioc);
                return;
index 8bb5b8f9f4d2cdbbc127c73cda4d9672b4adcf77..1ccfbc7eebe0323ce88b1c450e52bb87aba3c45e 100644 (file)
@@ -1462,11 +1462,23 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
 {
        struct scsi_cmnd *scmd = NULL;
        struct scsiio_tracker *st;
+       Mpi25SCSIIORequest_t *mpi_request;
 
        if (smid > 0  &&
            smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
                u32 unique_tag = smid - 1;
 
+               mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+               /*
+                * If SCSI IO request is outstanding at driver level then
+                * DevHandle filed must be non-zero. If DevHandle is zero
+                * then it means that this smid is free at driver level,
+                * so return NULL.
+                */
+               if (!mpi_request->DevHandle)
+                       return scmd;
+
                scmd = scsi_host_find_tag(ioc->shost, unique_tag);
                if (scmd) {
                        st = scsi_cmd_priv(scmd);
index 16a18d5d856f91725b33e25df042eb21bba8c20a..6e4f4931ae175f806731d2fcb1fbb4ba655cc885 100644 (file)
@@ -3203,6 +3203,8 @@ static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session,
        if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
                return -EINVAL;
        ep = iscsi_lookup_endpoint(transport_fd);
+       if (!ep)
+               return -EINVAL;
        conn = cls_conn->dd_data;
        qla_conn = conn->dd_data;
        qla_conn->qla_ep = ep->dd_data;
index 6a9040faed00c93ba5beeda77ffb5b2cbcb6c07e..3b119ca0cc0ce9ba2cfcc95cf78307a96b1d264b 100644 (file)
@@ -771,6 +771,12 @@ store_state_field(struct device *dev, struct device_attribute *attr,
 
        mutex_lock(&sdev->state_mutex);
        ret = scsi_device_set_state(sdev, state);
+       /*
+        * If the device state changes to SDEV_RUNNING, we need to run
+        * the queue to avoid I/O hang.
+        */
+       if (ret == 0 && state == SDEV_RUNNING)
+               blk_mq_run_hw_queues(sdev->request_queue, true);
        mutex_unlock(&sdev->state_mutex);
 
        return ret == 0 ? count : -EINVAL;
index 251db30d0882dc83556a688798c4f277072edefe..2b2bc4b49d78a36c737cd9e70666b900ec0fc2b2 100644 (file)
@@ -1415,11 +1415,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
                        scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
        }
 
-       /*
-        * XXX and what if there are packets in flight and this close()
-        * XXX is followed by a "rmmod sd_mod"?
-        */
-
        scsi_disk_put(sdkp);
 }
 
@@ -3076,6 +3071,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
        unsigned int opt_xfer_bytes =
                logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
 
+       if (sdkp->opt_xfer_blocks == 0)
+               return false;
+
        if (sdkp->opt_xfer_blocks > dev_max) {
                sd_first_printk(KERN_WARNING, sdkp,
                                "Optimal transfer size %u logical blocks " \
@@ -3505,9 +3503,21 @@ static void scsi_disk_release(struct device *dev)
 {
        struct scsi_disk *sdkp = to_scsi_disk(dev);
        struct gendisk *disk = sdkp->disk;
-       
+       struct request_queue *q = disk->queue;
+
        ida_free(&sd_index_ida, sdkp->index);
 
+       /*
+        * Wait until all requests that are in progress have completed.
+        * This is necessary to avoid that e.g. scsi_end_request() crashes
+        * due to clearing the disk->private_data pointer. Wait from inside
+        * scsi_disk_release() instead of from sd_release() to avoid that
+        * freezing and unfreezing the request queue affects user space I/O
+        * in case multiple processes open a /dev/sd... node concurrently.
+        */
+       blk_mq_freeze_queue(q);
+       blk_mq_unfreeze_queue(q);
+
        disk->private_data = NULL;
        put_disk(disk);
        put_device(&sdkp->device->sdev_gendev);
index 9351349cf0a930cd5c25dedd6bb747970e455e96..1e0041ec813238cbfa7ab52c3fdc9799961169d1 100644 (file)
@@ -150,7 +150,12 @@ struct bcm2835_power {
 
 static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) & ~ASB_REQ_STOP);
@@ -165,7 +170,12 @@ static int bcm2835_asb_enable(struct bcm2835_power *power, u32 reg)
 
 static int bcm2835_asb_disable(struct bcm2835_power *power, u32 reg)
 {
-       u64 start = ktime_get_ns();
+       u64 start;
+
+       if (!reg)
+               return 0;
+
+       start = ktime_get_ns();
 
        /* Enable the module's async AXI bridges. */
        ASB_WRITE(reg, ASB_READ(reg) | ASB_REQ_STOP);
@@ -475,7 +485,7 @@ static int bcm2835_power_pd_power_off(struct generic_pm_domain *domain)
        }
 }
 
-static void
+static int
 bcm2835_init_power_domain(struct bcm2835_power *power,
                          int pd_xlate_index, const char *name)
 {
@@ -483,6 +493,17 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        struct bcm2835_power_domain *dom = &power->domains[pd_xlate_index];
 
        dom->clk = devm_clk_get(dev->parent, name);
+       if (IS_ERR(dom->clk)) {
+               int ret = PTR_ERR(dom->clk);
+
+               if (ret == -EPROBE_DEFER)
+                       return ret;
+
+               /* Some domains don't have a clk, so make sure that we
+                * don't deref an error pointer later.
+                */
+               dom->clk = NULL;
+       }
 
        dom->base.name = name;
        dom->base.power_on = bcm2835_power_pd_power_on;
@@ -495,6 +516,8 @@ bcm2835_init_power_domain(struct bcm2835_power *power,
        pm_genpd_init(&dom->base, NULL, true);
 
        power->pd_xlate.domains[pd_xlate_index] = &dom->base;
+
+       return 0;
 }
 
 /** bcm2835_reset_reset - Resets a block that has a reset line in the
@@ -592,7 +615,7 @@ static int bcm2835_power_probe(struct platform_device *pdev)
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM0 },
                { BCM2835_POWER_DOMAIN_IMAGE_PERI, BCM2835_POWER_DOMAIN_CAM1 },
        };
-       int ret, i;
+       int ret = 0, i;
        u32 id;
 
        power = devm_kzalloc(dev, sizeof(*power), GFP_KERNEL);
@@ -619,8 +642,11 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        power->pd_xlate.num_domains = ARRAY_SIZE(power_domain_names);
 
-       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++)
-               bcm2835_init_power_domain(power, i, power_domain_names[i]);
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               ret = bcm2835_init_power_domain(power, i, power_domain_names[i]);
+               if (ret)
+                       goto fail;
+       }
 
        for (i = 0; i < ARRAY_SIZE(domain_deps); i++) {
                pm_genpd_add_subdomain(&power->domains[domain_deps[i].parent].base,
@@ -634,12 +660,21 @@ static int bcm2835_power_probe(struct platform_device *pdev)
 
        ret = devm_reset_controller_register(dev, &power->reset);
        if (ret)
-               return ret;
+               goto fail;
 
        of_genpd_add_provider_onecell(dev->parent->of_node, &power->pd_xlate);
 
        dev_info(dev, "Broadcom BCM2835 power domains driver");
        return 0;
+
+fail:
+       for (i = 0; i < ARRAY_SIZE(power_domain_names); i++) {
+               struct generic_pm_domain *dom = &power->domains[i].base;
+
+               if (dom->name)
+                       pm_genpd_remove(dom);
+       }
+       return ret;
 }
 
 static int bcm2835_power_remove(struct platform_device *pdev)
index c0901b96cfe44850f6b6e580d95432b20e4a2d89..62951e836cbc879d1e4be6ba158a8230ebec2c52 100644 (file)
@@ -114,8 +114,6 @@ source "drivers/staging/ralink-gdma/Kconfig"
 
 source "drivers/staging/mt7621-mmc/Kconfig"
 
-source "drivers/staging/mt7621-eth/Kconfig"
-
 source "drivers/staging/mt7621-dts/Kconfig"
 
 source "drivers/staging/gasket/Kconfig"
index 57c6bce13ff4bff0c3487315835c2e1d6b80832e..d1b17ddcd354de10c68455bc64e802c101a59e7d 100644 (file)
@@ -47,7 +47,6 @@ obj-$(CONFIG_SPI_MT7621)      += mt7621-spi/
 obj-$(CONFIG_SOC_MT7621)       += mt7621-dma/
 obj-$(CONFIG_DMA_RALINK)       += ralink-gdma/
 obj-$(CONFIG_MTK_MMC)          += mt7621-mmc/
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING) += mt7621-eth/
 obj-$(CONFIG_SOC_MT7621)       += mt7621-dts/
 obj-$(CONFIG_STAGING_GASKET_FRAMEWORK) += gasket/
 obj-$(CONFIG_XIL_AXIS_FIFO)    += axis-fifo/
index 687537203d9cfba144fba1dc846d3cc410776b08..d9725888af6fc34045806fbb5c91ca372b7c9a46 100644 (file)
@@ -3,6 +3,7 @@
 #
 config XIL_AXIS_FIFO
        tristate "Xilinx AXI-Stream FIFO IP core driver"
+       depends on OF
        default n
        help
          This adds support for the Xilinx AXI-Stream
index a7d569cfca5db6b613e31d54eecb48e08f96413e..0dff1ac057cdeb0185cc67357213707aadf5c0cf 100644 (file)
@@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
                           unsigned int mask);
 unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
                                     unsigned int *data);
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+                                      struct comedi_cmd *cmd);
 unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
 unsigned int comedi_nscans_left(struct comedi_subdevice *s,
                                unsigned int nscans);
index eefa62f42c0f06d8b84e03379c0499d7b66d8ace..5a32b8fc000e3df08409028c9ffa5ff979d4efec 100644 (file)
@@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
 EXPORT_SYMBOL_GPL(comedi_dio_update_state);
 
 /**
- * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
+ * bytes
  * @s: COMEDI subdevice.
+ * @cmd: COMEDI command.
  *
  * Determines the overall scan length according to the subdevice type and the
- * number of channels in the scan.
+ * number of channels in the scan for the specified command.
  *
  * For digital input, output or input/output subdevices, samples for
  * multiple channels are assumed to be packed into one or more unsigned
@@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
  *
  * Returns the overall scan length in bytes.
  */
-unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
+                                      struct comedi_cmd *cmd)
 {
-       struct comedi_cmd *cmd = &s->async->cmd;
        unsigned int num_samples;
        unsigned int bits_per_sample;
 
@@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
        }
        return comedi_samples_to_bytes(s, num_samples);
 }
+EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
+
+/**
+ * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
+ * @s: COMEDI subdevice.
+ *
+ * Determines the overall scan length according to the subdevice type and the
+ * number of channels in the scan for the current command.
+ *
+ * For digital input, output or input/output subdevices, samples for
+ * multiple channels are assumed to be packed into one or more unsigned
+ * short or unsigned int values according to the subdevice's %SDF_LSAMPL
+ * flag.  For other types of subdevice, samples are assumed to occupy a
+ * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
+ *
+ * Returns the overall scan length in bytes.
+ */
+unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
+{
+       struct comedi_cmd *cmd = &s->async->cmd;
+
+       return comedi_bytes_per_scan_cmd(s, cmd);
+}
 EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
 
 static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
index 5edf59ac6706d3b5cd7d23d0f945895dc1cb8f48..b04dad8c70927a0aa52229393c063adce4b32e37 100644 (file)
@@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
                           struct comedi_subdevice *s, struct comedi_cmd *cmd)
 {
        struct ni_private *devpriv = dev->private;
+       unsigned int bytes_per_scan;
        int err = 0;
 
        /* Step 1 : check if triggers are trivially valid */
@@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
        err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
        err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
                                           cmd->chanlist_len);
-       err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
-                                           s->async->prealloc_bufsz /
-                                           comedi_bytes_per_scan(s));
+       bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
+       if (bytes_per_scan) {
+               err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
+                                                   s->async->prealloc_bufsz /
+                                                   bytes_per_scan);
+       }
 
        if (err)
                return 3;
index 829f7b12e0dcf4aa3ee34a5315f6d5f52b7087ec..9bbc68729c11052018c26335d6b88498e3491b32 100644 (file)
@@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
        [EROFS_FT_SYMLINK]      = DT_LNK,
 };
 
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+                            unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+       /* since the on-disk name could not have the trailing '\0' */
+       unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+       memcpy(dbg_namebuf, de_name, de_namelen);
+       dbg_namebuf[de_namelen] = '\0';
+
+       debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
+               de_namelen, d_type);
+#endif
+}
+
 static int erofs_fill_dentries(struct dir_context *ctx,
                               void *dentry_blk, unsigned int *ofs,
                               unsigned int nameoff, unsigned int maxsize)
@@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
        de = dentry_blk + *ofs;
        while (de < end) {
                const char *de_name;
-               int de_namelen;
+               unsigned int de_namelen;
                unsigned char d_type;
-#ifdef CONFIG_EROFS_FS_DEBUG
-               unsigned int dbg_namelen;
-               unsigned char dbg_namebuf[EROFS_NAME_LEN];
-#endif
 
-               if (unlikely(de->file_type < EROFS_FT_MAX))
+               if (de->file_type < EROFS_FT_MAX)
                        d_type = erofs_filetype_table[de->file_type];
                else
                        d_type = DT_UNKNOWN;
@@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
                nameoff = le16_to_cpu(de->nameoff);
                de_name = (char *)dentry_blk + nameoff;
 
-               de_namelen = unlikely(de + 1 >= end) ?
-                       /* last directory entry */
-                       strnlen(de_name, maxsize - nameoff) :
-                       le16_to_cpu(de[1].nameoff) - nameoff;
+               /* the last dirent in the block? */
+               if (de + 1 >= end)
+                       de_namelen = strnlen(de_name, maxsize - nameoff);
+               else
+                       de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
 
                /* a corrupted entry is found */
-               if (unlikely(de_namelen < 0)) {
+               if (unlikely(nameoff + de_namelen > maxsize ||
+                            de_namelen > EROFS_NAME_LEN)) {
                        DBG_BUGON(1);
                        return -EIO;
                }
 
-#ifdef CONFIG_EROFS_FS_DEBUG
-               dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
-               memcpy(dbg_namebuf, de_name, dbg_namelen);
-               dbg_namebuf[dbg_namelen] = '\0';
-
-               debugln("%s, found de_name %s de_len %d d_type %d", __func__,
-                       dbg_namebuf, de_namelen, d_type);
-#endif
-
+               debug_one_dentry(d_type, de_name, de_namelen);
                if (!dir_emit(ctx, de_name, de_namelen,
                              le64_to_cpu(de->nid), d_type))
                        /* stopped by some reason */
index 8715bc50e09c16d44ece32baa474eb7d9bc5ab8f..31eef839577436709b1a5261507aff59bcf821d7 100644 (file)
@@ -972,6 +972,7 @@ repeat:
        overlapped = false;
        compressed_pages = grp->compressed_pages;
 
+       err = 0;
        for (i = 0; i < clusterpages; ++i) {
                unsigned int pagenr;
 
@@ -981,26 +982,39 @@ repeat:
                DBG_BUGON(!page);
                DBG_BUGON(!page->mapping);
 
-               if (z_erofs_is_stagingpage(page))
-                       continue;
+               if (!z_erofs_is_stagingpage(page)) {
 #ifdef EROFS_FS_HAS_MANAGED_CACHE
-               if (page->mapping == MNGD_MAPPING(sbi)) {
-                       DBG_BUGON(!PageUptodate(page));
-                       continue;
-               }
+                       if (page->mapping == MNGD_MAPPING(sbi)) {
+                               if (unlikely(!PageUptodate(page)))
+                                       err = -EIO;
+                               continue;
+                       }
 #endif
 
-               /* only non-head page could be reused as a compressed page */
-               pagenr = z_erofs_onlinepage_index(page);
+                       /*
+                        * only if non-head page can be selected
+                        * for inplace decompression
+                        */
+                       pagenr = z_erofs_onlinepage_index(page);
 
-               DBG_BUGON(pagenr >= nr_pages);
-               DBG_BUGON(pages[pagenr]);
-               ++sparsemem_pages;
-               pages[pagenr] = page;
+                       DBG_BUGON(pagenr >= nr_pages);
+                       DBG_BUGON(pages[pagenr]);
+                       ++sparsemem_pages;
+                       pages[pagenr] = page;
 
-               overlapped = true;
+                       overlapped = true;
+               }
+
+               /* PG_error needs checking for inplaced and staging pages */
+               if (unlikely(PageError(page))) {
+                       DBG_BUGON(PageUptodate(page));
+                       err = -EIO;
+               }
        }
 
+       if (unlikely(err))
+               goto out;
+
        llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
 
        if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
@@ -1029,6 +1043,10 @@ repeat:
 
 skip_allocpage:
        vout = erofs_vmap(pages, nr_pages);
+       if (!vout) {
+               err = -ENOMEM;
+               goto out;
+       }
 
        err = z_erofs_vle_unzip_vmap(compressed_pages,
                clusterpages, vout, llen, work->pageofs, overlapped);
@@ -1194,6 +1212,7 @@ repeat:
        if (page->mapping == mc) {
                WRITE_ONCE(grp->compressed_pages[nr], page);
 
+               ClearPageError(page);
                if (!PagePrivate(page)) {
                        /*
                         * impossible to be !PagePrivate(page) for
index 48b263a2731aad2edd28f19f9df3fcc8e461bc1c..0daac9b984a8ec82207ca4e53da9a201a4204707 100644 (file)
@@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
 
        nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
 
-       if (clusterpages == 1)
+       if (clusterpages == 1) {
                vin = kmap_atomic(compressed_pages[0]);
-       else
+       } else {
                vin = erofs_vmap(compressed_pages, clusterpages);
+               if (!vin)
+                       return -ENOMEM;
+       }
 
        preempt_disable();
        vout = erofs_pcpubuf[smp_processor_id()].data;
index b733855402168efffa6a8a192a3d21e5d3e0ff60..250c15ace2a71147be4e6f14e522b6a3e898745e 100644 (file)
        status = "okay";
 };
 
-&ethernet {
-       //mtd-mac-address = <&factory 0xe000>;
-       gmac1: mac@0 {
-               compatible = "mediatek,eth-mac";
-               reg = <0>;
-               phy-handle = <&phy1>;
-       };
-
-       mdio-bus {
-               phy1: ethernet-phy@1 {
-                       reg = <1>;
-                       phy-mode = "rgmii";
-               };
-       };
-};
-
 &pinctrl {
        state_default: pinctrl0 {
                gpio {
                };
        };
 };
+
+&switch0 {
+       ports {
+               port@0 {
+                       label = "ethblack";
+                       status = "ok";
+               };
+               port@4 {
+                       label = "ethblue";
+                       status = "ok";
+               };
+       };
+};
index 6aff3680ce4b6b4574247ffc564b2fe922e5a544..17020e24abd294055b321985c6806904eb1e8e81 100644 (file)
 
                mediatek,ethsys = <&ethsys>;
 
-               mediatek,switch = <&gsw>;
 
+               gmac0: mac@0 {
+                       compatible = "mediatek,eth-mac";
+                       reg = <0>;
+                       phy-mode = "rgmii";
+                       fixed-link {
+                               speed = <1000>;
+                               full-duplex;
+                               pause;
+                       };
+               };
+               gmac1: mac@1 {
+                       compatible = "mediatek,eth-mac";
+                       reg = <1>;
+                       status = "off";
+                       phy-mode = "rgmii";
+                       phy-handle = <&phy5>;
+               };
                mdio-bus {
                        #address-cells = <1>;
                        #size-cells = <0>;
 
-                       phy1f: ethernet-phy@1f {
-                               reg = <0x1f>;
+                       phy5: ethernet-phy@5 {
+                               reg = <5>;
                                phy-mode = "rgmii";
                        };
+
+                       switch0: switch0@0 {
+                               compatible = "mediatek,mt7621";
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               reg = <0>;
+                               mediatek,mcm;
+                               resets = <&rstctrl 2>;
+                               reset-names = "mcm";
+
+                               ports {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       reg = <0>;
+                                       port@0 {
+                                               status = "off";
+                                               reg = <0>;
+                                               label = "lan0";
+                                       };
+                                       port@1 {
+                                               status = "off";
+                                               reg = <1>;
+                                               label = "lan1";
+                                       };
+                                       port@2 {
+                                               status = "off";
+                                               reg = <2>;
+                                               label = "lan2";
+                                       };
+                                       port@3 {
+                                               status = "off";
+                                               reg = <3>;
+                                               label = "lan3";
+                                       };
+                                       port@4 {
+                                               status = "off";
+                                               reg = <4>;
+                                               label = "lan4";
+                                       };
+                                       port@6 {
+                                               reg = <6>;
+                                               label = "cpu";
+                                               ethernet = <&gmac0>;
+                                               phy-mode = "trgmii";
+                                               fixed-link {
+                                                       speed = <1000>;
+                                                       full-duplex;
+                                               };
+                                       };
+                               };
+                       };
                };
        };
 
diff --git a/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt b/drivers/staging/mt7621-eth/Documentation/devicetree/bindings/net/mediatek-net-gsw.txt
deleted file mode 100644 (file)
index 596b385..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs.
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw", "mediatek,mt7621-gsw",
-  "mediatek,mt7623-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-
-
-Additional required properties for ARM based SoCs:
-- mediatek,reset-pin: phandle describing the reset GPIO
-- clocks: the clocks used by the switch
-- clock-names: the names of the clocks listed in the clocks property
-  these should be "trgpll", "esw", "gp2", "gp1"
-- mt7530-supply: the phandle of the regulator used to power the switch
-- mediatek,pctl-regmap: phandle to the port control regmap. this is used to
-  setup the drive current
-
-
-Optional properties:
-- interrupt-parent: Should be the phandle for the interrupt controller
-  that services interrupts for this device
-
-Example:
-
-gsw: switch@1b100000 {
-       compatible = "mediatek,mt7623-gsw";
-       reg = <0 0x1b110000 0 0x300000>;
-
-       interrupt-parent = <&pio>;
-       interrupts = <168 IRQ_TYPE_EDGE_RISING>;
-
-       clocks = <&apmixedsys CLK_APMIXED_TRGPLL>,
-                <&ethsys CLK_ETHSYS_ESW>,
-                <&ethsys CLK_ETHSYS_GP2>,
-                <&ethsys CLK_ETHSYS_GP1>;
-       clock-names = "trgpll", "esw", "gp2", "gp1";
-
-       mt7530-supply = <&mt6323_vpa_reg>;
-
-       mediatek,pctl-regmap = <&syscfg_pctl_a>;
-       mediatek,reset-pin = <&pio 15 0>;
-
-       status = "okay";
-};
diff --git a/drivers/staging/mt7621-eth/Kconfig b/drivers/staging/mt7621-eth/Kconfig
deleted file mode 100644 (file)
index 44ea86c..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-config NET_VENDOR_MEDIATEK_STAGING
-       bool "MediaTek ethernet driver - staging version"
-       depends on RALINK
-       ---help---
-         If you have an MT7621 Mediatek SoC with ethernet, say Y.
-
-if NET_VENDOR_MEDIATEK_STAGING
-choice
-       prompt "MAC type"
-
-config NET_MEDIATEK_MT7621
-       bool "MT7621"
-       depends on MIPS && SOC_MT7621
-
-endchoice
-
-config NET_MEDIATEK_SOC_STAGING
-       tristate "MediaTek SoC Gigabit Ethernet support"
-       depends on NET_VENDOR_MEDIATEK_STAGING
-       select PHYLIB
-       ---help---
-         This driver supports the gigabit ethernet MACs in the
-         MediaTek SoC family.
-
-config NET_MEDIATEK_MDIO
-       def_bool NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-       select PHYLIB
-
-config NET_MEDIATEK_MDIO_MT7620
-       def_bool NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-       select NET_MEDIATEK_MDIO
-
-config NET_MEDIATEK_GSW_MT7621
-       def_tristate NET_MEDIATEK_SOC_STAGING
-       depends on NET_MEDIATEK_MT7621
-
-endif #NET_VENDOR_MEDIATEK_STAGING
diff --git a/drivers/staging/mt7621-eth/Makefile b/drivers/staging/mt7621-eth/Makefile
deleted file mode 100644 (file)
index 018bcc3..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#
-# Makefile for the Ralink SoCs built-in ethernet macs
-#
-
-mtk-eth-soc-y                                  += mtk_eth_soc.o ethtool.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO)                += mdio.o
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MDIO_MT7620) += mdio_mt7620.o
-
-mtk-eth-soc-$(CONFIG_NET_MEDIATEK_MT7621)      += soc_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_GSW_MT7621)          += gsw_mt7621.o
-
-obj-$(CONFIG_NET_MEDIATEK_SOC_STAGING)         += mtk-eth-soc.o
diff --git a/drivers/staging/mt7621-eth/TODO b/drivers/staging/mt7621-eth/TODO
deleted file mode 100644 (file)
index f9e47d4..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-
-- verify devicetree documentation is consistent with code
-- fix ethtool - currently doesn't return valid data.
-- general code review and clean up
-- add support for second MAC on mt7621
-- convert gsw code to use switchdev interfaces
-- md7620_mmi_write etc should probably be wrapped
-  in a regmap abstraction.
-- Get soc_mt7621 to work with QDMA TX if possible.
-- Ensure phys are correctly configured when a cable
-  is plugged in.
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-eth/ethtool.c b/drivers/staging/mt7621-eth/ethtool.c
deleted file mode 100644 (file)
index 8c4228e..0000000
+++ /dev/null
@@ -1,250 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include "mtk_eth_soc.h"
-#include "ethtool.h"
-
-struct mtk_stat {
-       char name[ETH_GSTRING_LEN];
-       unsigned int idx;
-};
-
-#define MTK_HW_STAT(stat) { \
-       .name = #stat, \
-       .idx = offsetof(struct mtk_hw_stats, stat) / sizeof(u64) \
-}
-
-static const struct mtk_stat mtk_ethtool_hw_stats[] = {
-       MTK_HW_STAT(tx_bytes),
-       MTK_HW_STAT(tx_packets),
-       MTK_HW_STAT(tx_skip),
-       MTK_HW_STAT(tx_collisions),
-       MTK_HW_STAT(rx_bytes),
-       MTK_HW_STAT(rx_packets),
-       MTK_HW_STAT(rx_overflow),
-       MTK_HW_STAT(rx_fcs_errors),
-       MTK_HW_STAT(rx_short_errors),
-       MTK_HW_STAT(rx_long_errors),
-       MTK_HW_STAT(rx_checksum_errors),
-       MTK_HW_STAT(rx_flow_control_packets),
-};
-
-#define MTK_HW_STATS_LEN       ARRAY_SIZE(mtk_ethtool_hw_stats)
-
-static int mtk_get_link_ksettings(struct net_device *dev,
-                                 struct ethtool_link_ksettings *cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       int err;
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-               err = phy_read_status(mac->phy_dev);
-               if (err)
-                       return -ENODEV;
-       }
-
-       phy_ethtool_ksettings_get(mac->phy_dev, cmd);
-       return 0;
-}
-
-static int mtk_set_link_ksettings(struct net_device *dev,
-                                 const struct ethtool_link_ksettings *cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       if (cmd->base.phy_address != mac->phy_dev->mdio.addr) {
-               if (mac->hw->phy->phy_node[cmd->base.phy_address]) {
-                       mac->phy_dev = mac->hw->phy->phy[cmd->base.phy_address];
-                       mac->phy_flags = MTK_PHY_FLAG_PORT;
-               } else if (mac->hw->mii_bus) {
-                       mac->phy_dev = mdiobus_get_phy(mac->hw->mii_bus,
-                                                      cmd->base.phy_address);
-                       if (!mac->phy_dev)
-                               return -ENODEV;
-                       mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-               } else {
-                       return -ENODEV;
-               }
-       }
-
-       return phy_ethtool_ksettings_set(mac->phy_dev, cmd);
-}
-
-static void mtk_get_drvinfo(struct net_device *dev,
-                           struct ethtool_drvinfo *info)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_soc_data *soc = mac->hw->soc;
-
-       strlcpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
-       strlcpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
-
-       if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE])
-               info->n_stats = MTK_HW_STATS_LEN;
-}
-
-static u32 mtk_get_msglevel(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       return mac->hw->msg_enable;
-}
-
-static void mtk_set_msglevel(struct net_device *dev, u32 value)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       mac->hw->msg_enable = value;
-}
-
-static int mtk_nway_reset(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -EOPNOTSUPP;
-
-       return genphy_restart_aneg(mac->phy_dev);
-}
-
-static u32 mtk_get_link(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       int err;
-
-       if (!mac->phy_dev)
-               goto out_get_link;
-
-       if (mac->phy_flags == MTK_PHY_FLAG_ATTACH) {
-               err = genphy_update_link(mac->phy_dev);
-               if (err)
-                       goto out_get_link;
-       }
-
-       return mac->phy_dev->link;
-
-out_get_link:
-       return ethtool_op_get_link(dev);
-}
-
-static int mtk_set_ringparam(struct net_device *dev,
-                            struct ethtool_ringparam *ring)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if ((ring->tx_pending < 2) ||
-           (ring->rx_pending < 2) ||
-           (ring->rx_pending > mac->hw->soc->dma_ring_size) ||
-           (ring->tx_pending > mac->hw->soc->dma_ring_size))
-               return -EINVAL;
-
-       dev->netdev_ops->ndo_stop(dev);
-
-       mac->hw->tx_ring.tx_ring_size = BIT(fls(ring->tx_pending) - 1);
-       mac->hw->rx_ring[0].rx_ring_size = BIT(fls(ring->rx_pending) - 1);
-
-       return dev->netdev_ops->ndo_open(dev);
-}
-
-static void mtk_get_ringparam(struct net_device *dev,
-                             struct ethtool_ringparam *ring)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       ring->rx_max_pending = mac->hw->soc->dma_ring_size;
-       ring->tx_max_pending = mac->hw->soc->dma_ring_size;
-       ring->rx_pending = mac->hw->rx_ring[0].rx_ring_size;
-       ring->tx_pending = mac->hw->tx_ring.tx_ring_size;
-}
-
-static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
-{
-       int i;
-
-       switch (stringset) {
-       case ETH_SS_STATS:
-               for (i = 0; i < MTK_HW_STATS_LEN; i++) {
-                       memcpy(data, mtk_ethtool_hw_stats[i].name,
-                              ETH_GSTRING_LEN);
-                       data += ETH_GSTRING_LEN;
-               }
-               break;
-       }
-}
-
-static int mtk_get_sset_count(struct net_device *dev, int sset)
-{
-       switch (sset) {
-       case ETH_SS_STATS:
-               return MTK_HW_STATS_LEN;
-       default:
-               return -EOPNOTSUPP;
-       }
-}
-
-static void mtk_get_ethtool_stats(struct net_device *dev,
-                                 struct ethtool_stats *stats, u64 *data)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_hw_stats *hwstats = mac->hw_stats;
-       unsigned int start;
-       int i;
-
-       if (netif_running(dev) && netif_device_present(dev)) {
-               if (spin_trylock(&hwstats->stats_lock)) {
-                       mtk_stats_update_mac(mac);
-                       spin_unlock(&hwstats->stats_lock);
-               }
-       }
-
-       do {
-               start = u64_stats_fetch_begin_irq(&hwstats->syncp);
-               for (i = 0; i < MTK_HW_STATS_LEN; i++)
-                       data[i] = ((u64 *)hwstats)[mtk_ethtool_hw_stats[i].idx];
-
-       } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start));
-}
-
-static struct ethtool_ops mtk_ethtool_ops = {
-       .get_link_ksettings     = mtk_get_link_ksettings,
-       .set_link_ksettings     = mtk_set_link_ksettings,
-       .get_drvinfo            = mtk_get_drvinfo,
-       .get_msglevel           = mtk_get_msglevel,
-       .set_msglevel           = mtk_set_msglevel,
-       .nway_reset             = mtk_nway_reset,
-       .get_link               = mtk_get_link,
-       .set_ringparam          = mtk_set_ringparam,
-       .get_ringparam          = mtk_get_ringparam,
-};
-
-void mtk_set_ethtool_ops(struct net_device *netdev)
-{
-       struct mtk_mac *mac = netdev_priv(netdev);
-       struct mtk_soc_data *soc = mac->hw->soc;
-
-       if (soc->reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-               mtk_ethtool_ops.get_strings = mtk_get_strings;
-               mtk_ethtool_ops.get_sset_count = mtk_get_sset_count;
-               mtk_ethtool_ops.get_ethtool_stats = mtk_get_ethtool_stats;
-       }
-
-       netdev->ethtool_ops = &mtk_ethtool_ops;
-}
diff --git a/drivers/staging/mt7621-eth/ethtool.h b/drivers/staging/mt7621-eth/ethtool.h
deleted file mode 100644 (file)
index 0071469..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETHTOOL_H
-#define MTK_ETHTOOL_H
-
-#include <linux/ethtool.h>
-
-void mtk_set_ethtool_ops(struct net_device *netdev);
-
-#endif /* MTK_ETHTOOL_H */
diff --git a/drivers/staging/mt7621-eth/gsw_mt7620.h b/drivers/staging/mt7621-eth/gsw_mt7620.h
deleted file mode 100644 (file)
index 70f7e54..0000000
+++ /dev/null
@@ -1,277 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_GSW_MT7620_H__
-#define _RALINK_GSW_MT7620_H__
-
-#define GSW_REG_PHY_TIMEOUT    (5 * HZ)
-
-#define MT7620_GSW_REG_PIAC    0x0004
-
-#define GSW_NUM_VLANS          16
-#define GSW_NUM_VIDS           4096
-#define GSW_NUM_PORTS          7
-#define GSW_PORT6              6
-
-#define GSW_MDIO_ACCESS                BIT(31)
-#define GSW_MDIO_READ          BIT(19)
-#define GSW_MDIO_WRITE         BIT(18)
-#define GSW_MDIO_START         BIT(16)
-#define GSW_MDIO_ADDR_SHIFT    20
-#define GSW_MDIO_REG_SHIFT     25
-
-#define GSW_REG_PORT_PMCR(x)   (0x3000 + (x * 0x100))
-#define GSW_REG_PORT_STATUS(x) (0x3008 + (x * 0x100))
-#define GSW_REG_SMACCR0                0x3fE4
-#define GSW_REG_SMACCR1                0x3fE8
-#define GSW_REG_CKGCR          0x3ff0
-
-#define GSW_REG_IMR            0x7008
-#define GSW_REG_ISR            0x700c
-#define GSW_REG_GPC1           0x7014
-
-#define SYSC_REG_CHIP_REV_ID   0x0c
-#define SYSC_REG_CFG           0x10
-#define SYSC_REG_CFG1          0x14
-#define RST_CTRL_MCM           BIT(2)
-#define SYSC_PAD_RGMII2_MDIO   0x58
-#define SYSC_GPIO_MODE         0x60
-
-#define PORT_IRQ_ST_CHG                0x7f
-
-#define MT7621_ESW_PHY_POLLING 0x0000
-#define MT7620_ESW_PHY_POLLING 0x7000
-
-#define        PMCR_IPG                BIT(18)
-#define        PMCR_MAC_MODE           BIT(16)
-#define        PMCR_FORCE              BIT(15)
-#define        PMCR_TX_EN              BIT(14)
-#define        PMCR_RX_EN              BIT(13)
-#define        PMCR_BACKOFF            BIT(9)
-#define        PMCR_BACKPRES           BIT(8)
-#define        PMCR_RX_FC              BIT(5)
-#define        PMCR_TX_FC              BIT(4)
-#define        PMCR_SPEED(_x)          (_x << 2)
-#define        PMCR_DUPLEX             BIT(1)
-#define        PMCR_LINK               BIT(0)
-
-#define PHY_AN_EN              BIT(31)
-#define PHY_PRE_EN             BIT(30)
-#define PMY_MDC_CONF(_x)       ((_x & 0x3f) << 24)
-
-/* ethernet subsystem config register */
-#define ETHSYS_SYSCFG0         0x14
-/* ethernet subsystem clock register */
-#define ETHSYS_CLKCFG0         0x2c
-#define ETHSYS_TRGMII_CLK_SEL362_5     BIT(11)
-
-/* p5 RGMII wrapper TX clock control register */
-#define MT7530_P5RGMIITXCR     0x7b04
-/* p5 RGMII wrapper RX clock control register */
-#define MT7530_P5RGMIIRXCR     0x7b00
-/* TRGMII TDX ODT registers */
-#define MT7530_TRGMII_TD0_ODT  0x7a54
-#define MT7530_TRGMII_TD1_ODT  0x7a5c
-#define MT7530_TRGMII_TD2_ODT  0x7a64
-#define MT7530_TRGMII_TD3_ODT  0x7a6c
-#define MT7530_TRGMII_TD4_ODT  0x7a74
-#define MT7530_TRGMII_TD5_ODT  0x7a7c
-/* TRGMII TCK ctrl register */
-#define MT7530_TRGMII_TCK_CTRL 0x7a78
-/* TRGMII Tx ctrl register */
-#define MT7530_TRGMII_TXCTRL   0x7a40
-/* port 6 extended control register */
-#define MT7530_P6ECR            0x7830
-/* IO driver control register */
-#define MT7530_IO_DRV_CR       0x7810
-/* top signal control register */
-#define MT7530_TOP_SIG_CTRL    0x7808
-/* modified hwtrap register */
-#define MT7530_MHWTRAP         0x7804
-/* hwtrap status register */
-#define MT7530_HWTRAP          0x7800
-/* status interrupt register */
-#define MT7530_SYS_INT_STS     0x700c
-/* system nterrupt register */
-#define MT7530_SYS_INT_EN      0x7008
-/* system control register */
-#define MT7530_SYS_CTRL                0x7000
-/* port MAC status register */
-#define MT7530_PMSR_P(x)       (0x3008 + (x * 0x100))
-/* port MAC control register */
-#define MT7530_PMCR_P(x)       (0x3000 + (x * 0x100))
-
-#define MT7621_XTAL_SHIFT      6
-#define MT7621_XTAL_MASK       0x7
-#define MT7621_XTAL_25         6
-#define MT7621_XTAL_40         3
-#define MT7621_MDIO_DRV_MASK   (3 << 4)
-#define MT7621_GE1_MODE_MASK   (3 << 12)
-
-#define TRGMII_TXCTRL_TXC_INV  BIT(30)
-#define P6ECR_INTF_MODE_RGMII  BIT(1)
-#define P5RGMIIRXCR_C_ALIGN    BIT(8)
-#define P5RGMIIRXCR_DELAY_2    BIT(1)
-#define P5RGMIITXCR_DELAY_2    (BIT(8) | BIT(2))
-
-/* TOP_SIG_CTRL bits */
-#define TOP_SIG_CTRL_NORMAL    (BIT(17) | BIT(16))
-
-/* MHWTRAP bits */
-#define MHWTRAP_MANUAL         BIT(16)
-#define MHWTRAP_P5_MAC_SEL     BIT(13)
-#define MHWTRAP_P6_DIS         BIT(8)
-#define MHWTRAP_P5_RGMII_MODE  BIT(7)
-#define MHWTRAP_P5_DIS         BIT(6)
-#define MHWTRAP_PHY_ACCESS     BIT(5)
-
-/* HWTRAP bits */
-#define HWTRAP_XTAL_SHIFT      9
-#define HWTRAP_XTAL_MASK       0x3
-
-/* SYS_CTRL bits */
-#define SYS_CTRL_SW_RST                BIT(1)
-#define SYS_CTRL_REG_RST       BIT(0)
-
-/* PMCR bits */
-#define PMCR_IFG_XMIT_96       BIT(18)
-#define PMCR_MAC_MODE          BIT(16)
-#define PMCR_FORCE_MODE                BIT(15)
-#define PMCR_TX_EN             BIT(14)
-#define PMCR_RX_EN             BIT(13)
-#define PMCR_BACK_PRES_EN      BIT(9)
-#define PMCR_BACKOFF_EN                BIT(8)
-#define PMCR_TX_FC_EN          BIT(5)
-#define PMCR_RX_FC_EN          BIT(4)
-#define PMCR_FORCE_SPEED_1000  BIT(3)
-#define PMCR_FORCE_FDX         BIT(1)
-#define PMCR_FORCE_LNK         BIT(0)
-#define PMCR_FIXED_LINK                (PMCR_IFG_XMIT_96 | PMCR_MAC_MODE | \
-                                PMCR_FORCE_MODE | PMCR_TX_EN | PMCR_RX_EN | \
-                                PMCR_BACK_PRES_EN | PMCR_BACKOFF_EN | \
-                                PMCR_FORCE_SPEED_1000 | PMCR_FORCE_FDX | \
-                                PMCR_FORCE_LNK)
-
-#define PMCR_FIXED_LINK_FC     (PMCR_FIXED_LINK | \
-                                PMCR_TX_FC_EN | PMCR_RX_FC_EN)
-
-/* TRGMII control registers */
-#define GSW_INTF_MODE          0x390
-#define GSW_TRGMII_TD0_ODT     0x354
-#define GSW_TRGMII_TD1_ODT     0x35c
-#define GSW_TRGMII_TD2_ODT     0x364
-#define GSW_TRGMII_TD3_ODT     0x36c
-#define GSW_TRGMII_TXCTL_ODT   0x374
-#define GSW_TRGMII_TCK_ODT     0x37c
-#define GSW_TRGMII_RCK_CTRL    0x300
-
-#define INTF_MODE_TRGMII       BIT(1)
-#define TRGMII_RCK_CTRL_RX_RST BIT(31)
-
-/* Mac control registers */
-#define MTK_MAC_P2_MCR         0x200
-#define MTK_MAC_P1_MCR         0x100
-
-#define MAC_MCR_MAX_RX_2K      BIT(29)
-#define MAC_MCR_IPG_CFG                (BIT(18) | BIT(16))
-#define MAC_MCR_FORCE_MODE     BIT(15)
-#define MAC_MCR_TX_EN          BIT(14)
-#define MAC_MCR_RX_EN          BIT(13)
-#define MAC_MCR_BACKOFF_EN     BIT(9)
-#define MAC_MCR_BACKPR_EN      BIT(8)
-#define MAC_MCR_FORCE_RX_FC    BIT(5)
-#define MAC_MCR_FORCE_TX_FC    BIT(4)
-#define MAC_MCR_SPEED_1000     BIT(3)
-#define MAC_MCR_FORCE_DPX      BIT(1)
-#define MAC_MCR_FORCE_LINK     BIT(0)
-#define MAC_MCR_FIXED_LINK     (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-                                MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN | \
-                                MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN | \
-                                MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_RX_FC | \
-                                MAC_MCR_FORCE_TX_FC | MAC_MCR_SPEED_1000 | \
-                                MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_LINK)
-#define MAC_MCR_FIXED_LINK_FC  (MAC_MCR_MAX_RX_2K | MAC_MCR_IPG_CFG | \
-                                MAC_MCR_FIXED_LINK)
-
-/* possible XTAL speed */
-#define        MT7623_XTAL_40          0
-#define MT7623_XTAL_20         1
-#define MT7623_XTAL_25         3
-
-/* GPIO port control registers */
-#define        GPIO_OD33_CTRL8         0x4c0
-#define        GPIO_BIAS_CTRL          0xed0
-#define GPIO_DRV_SEL10         0xf00
-
-/* on MT7620 the functio of port 4 can be software configured */
-enum {
-       PORT4_EPHY = 0,
-       PORT4_EXT,
-};
-
-/* struct mt7620_gsw - the structure that holds the SoC specific data
- * @dev:               The Device struct
- * @base:              The base address
- * @piac_offset:       The PIAC base may change depending on SoC
- * @irq:               The IRQ we are using
- * @port4:             The port4 mode on MT7620
- * @autopoll:          Is MDIO autopolling enabled
- * @ethsys:            The ethsys register map
- * @pctl:              The pin control register map
- * @clk_gsw:           The switch clock
- * @clk_gp1:           The gmac1 clock
- * @clk_gp2:           The gmac2 clock
- * @clk_trgpll:                The trgmii pll clock
- */
-struct mt7620_gsw {
-       struct device           *dev;
-       void __iomem            *base;
-       u32                     piac_offset;
-       int                     irq;
-       int                     port4;
-       unsigned long int       autopoll;
-
-       struct regmap           *ethsys;
-       struct regmap           *pctl;
-
-       struct clk              *clk_gsw;
-       struct clk              *clk_gp1;
-       struct clk              *clk_gp2;
-       struct clk              *clk_trgpll;
-};
-
-/* switch register I/O wrappers */
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg);
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg);
-
-/* the callback used by the driver core to bringup the switch */
-int mtk_gsw_init(struct mtk_eth *eth);
-
-/* MDIO access wrappers */
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val);
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg);
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port);
-int mt7620_has_carrier(struct mtk_eth *eth);
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-                            int speed, int duplex);
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val);
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg);
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg);
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-                     u32 phy_register, u32 write_data);
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg);
-void mt7620_handle_carrier(struct mtk_eth *eth);
-
-#endif
diff --git a/drivers/staging/mt7621-eth/gsw_mt7621.c b/drivers/staging/mt7621-eth/gsw_mt7621.c
deleted file mode 100644 (file)
index 53767b1..0000000
+++ /dev/null
@@ -1,297 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-
-#include <ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-
-void mtk_switch_w32(struct mt7620_gsw *gsw, u32 val, unsigned int reg)
-{
-       iowrite32(val, gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_w32);
-
-u32 mtk_switch_r32(struct mt7620_gsw *gsw, unsigned int reg)
-{
-       return ioread32(gsw->base + reg);
-}
-EXPORT_SYMBOL_GPL(mtk_switch_r32);
-
-static irqreturn_t gsw_interrupt_mt7621(int irq, void *_eth)
-{
-       struct mtk_eth *eth = (struct mtk_eth *)_eth;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-       u32 reg, i;
-
-       reg = mt7530_mdio_r32(gsw, MT7530_SYS_INT_STS);
-
-       for (i = 0; i < 5; i++) {
-               unsigned int link;
-
-               if ((reg & BIT(i)) == 0)
-                       continue;
-
-               link = mt7530_mdio_r32(gsw, MT7530_PMSR_P(i)) & 0x1;
-
-               if (link == eth->link[i])
-                       continue;
-
-               eth->link[i] = link;
-               if (link)
-                       netdev_info(*eth->netdev,
-                                   "port %d link up\n", i);
-               else
-                       netdev_info(*eth->netdev,
-                                   "port %d link down\n", i);
-       }
-
-       mt7530_mdio_w32(gsw, MT7530_SYS_INT_STS, 0x1f);
-
-       return IRQ_HANDLED;
-}
-
-static void mt7621_hw_init(struct mtk_eth *eth, struct mt7620_gsw *gsw,
-                          struct device_node *np)
-{
-       u32 i;
-       u32 val;
-
-       /* hardware reset the switch */
-       mtk_reset(eth, RST_CTRL_MCM);
-       mdelay(10);
-
-       /* reduce RGMII2 PAD driving strength */
-       rt_sysc_m32(MT7621_MDIO_DRV_MASK, 0, SYSC_PAD_RGMII2_MDIO);
-
-       /* gpio mux - RGMII1=Normal mode */
-       rt_sysc_m32(BIT(14), 0, SYSC_GPIO_MODE);
-
-       /* set GMAC1 RGMII mode */
-       rt_sysc_m32(MT7621_GE1_MODE_MASK, 0, SYSC_REG_CFG1);
-
-       /* enable MDIO to control MT7530 */
-       rt_sysc_m32(3 << 12, 0, SYSC_GPIO_MODE);
-
-       /* turn off all PHYs */
-       for (i = 0; i <= 4; i++) {
-               val = _mt7620_mii_read(gsw, i, 0x0);
-               val |= BIT(11);
-               _mt7620_mii_write(gsw, i, 0x0, val);
-       }
-
-       /* reset the switch */
-       mt7530_mdio_w32(gsw, MT7530_SYS_CTRL,
-                       SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
-       usleep_range(10, 20);
-
-       if ((rt_sysc_r32(SYSC_REG_CHIP_REV_ID) & 0xFFFF) == 0x0101) {
-               /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-               mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK, MTK_MAC_P2_MCR);
-               mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK);
-       } else {
-               /* GE1, Force 1000M/FD, FC ON, MAX_RX_LENGTH 1536 */
-               mtk_switch_w32(gsw, MAC_MCR_FIXED_LINK_FC, MTK_MAC_P1_MCR);
-               mt7530_mdio_w32(gsw, MT7530_PMCR_P(6), PMCR_FIXED_LINK_FC);
-       }
-
-       /* GE2, Link down */
-       mtk_switch_w32(gsw, MAC_MCR_FORCE_MODE, MTK_MAC_P2_MCR);
-
-       /* Enable Port 6, P5 as GMAC5, P5 disable */
-       val = mt7530_mdio_r32(gsw, MT7530_MHWTRAP);
-       /* Enable Port 6 */
-       val &= ~MHWTRAP_P6_DIS;
-       /* Disable Port 5 */
-       val |= MHWTRAP_P5_DIS;
-       /* manual override of HW-Trap */
-       val |= MHWTRAP_MANUAL;
-       mt7530_mdio_w32(gsw, MT7530_MHWTRAP, val);
-
-       val = rt_sysc_r32(SYSC_REG_CFG);
-       val = (val >> MT7621_XTAL_SHIFT) & MT7621_XTAL_MASK;
-       if (val < MT7621_XTAL_25 && val >= MT7621_XTAL_40) {
-               /* 40Mhz */
-
-               /* disable MT7530 core clock */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x410);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x0);
-
-               /* disable MT7530 PLL */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40d);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x2020);
-
-               /* for MT7530 core clock = 500Mhz */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40e);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x119);
-
-               /* enable MT7530 PLL */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x40d);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-               _mt7620_mii_write(gsw, 0, 14, 0x2820);
-
-               usleep_range(20, 40);
-
-               /* enable MT7530 core clock */
-               _mt7620_mii_write(gsw, 0, 13, 0x1f);
-               _mt7620_mii_write(gsw, 0, 14, 0x410);
-               _mt7620_mii_write(gsw, 0, 13, 0x401f);
-       }
-
-       /* RGMII */
-       _mt7620_mii_write(gsw, 0, 14, 0x1);
-
-       /* set MT7530 central align */
-       mt7530_mdio_m32(gsw, BIT(0), P6ECR_INTF_MODE_RGMII, MT7530_P6ECR);
-       mt7530_mdio_m32(gsw, TRGMII_TXCTRL_TXC_INV, 0,
-                       MT7530_TRGMII_TXCTRL);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TCK_CTRL, 0x855);
-
-       /* delay setting for 10/1000M */
-       mt7530_mdio_w32(gsw, MT7530_P5RGMIIRXCR,
-                       P5RGMIIRXCR_C_ALIGN | P5RGMIIRXCR_DELAY_2);
-       mt7530_mdio_w32(gsw, MT7530_P5RGMIITXCR, 0x14);
-
-       /* lower Tx Driving*/
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD0_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD1_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD2_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD3_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD4_ODT, 0x44);
-       mt7530_mdio_w32(gsw, MT7530_TRGMII_TD5_ODT, 0x44);
-
-       /* turn on all PHYs */
-       for (i = 0; i <= 4; i++) {
-               val = _mt7620_mii_read(gsw, i, 0);
-               val &= ~BIT(11);
-               _mt7620_mii_write(gsw, i, 0, val);
-       }
-
-#define MT7530_NUM_PORTS 8
-#define REG_ESW_PORT_PCR(x)    (0x2004 | ((x) << 8))
-#define REG_ESW_PORT_PVC(x)    (0x2010 | ((x) << 8))
-#define REG_ESW_PORT_PPBV1(x)  (0x2014 | ((x) << 8))
-#define MT7530_CPU_PORT                6
-
-       /* This is copied from mt7530_apply_config in libreCMC driver */
-       {
-               int i;
-
-               for (i = 0; i < MT7530_NUM_PORTS; i++)
-                       mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(i), 0x00400000);
-
-               mt7530_mdio_w32(gsw, REG_ESW_PORT_PCR(MT7530_CPU_PORT),
-                               0x00ff0000);
-
-               for (i = 0; i < MT7530_NUM_PORTS; i++)
-                       mt7530_mdio_w32(gsw, REG_ESW_PORT_PVC(i), 0x810000c0);
-       }
-
-       /* enable irq */
-       mt7530_mdio_m32(gsw, 0, 3 << 16, MT7530_TOP_SIG_CTRL);
-       mt7530_mdio_w32(gsw, MT7530_SYS_INT_EN, 0x1f);
-}
-
-static const struct of_device_id mediatek_gsw_match[] = {
-       { .compatible = "mediatek,mt7621-gsw" },
-       {},
-};
-MODULE_DEVICE_TABLE(of, mediatek_gsw_match);
-
-int mtk_gsw_init(struct mtk_eth *eth)
-{
-       struct device_node *np = eth->switch_np;
-       struct platform_device *pdev = of_find_device_by_node(np);
-       struct mt7620_gsw *gsw;
-
-       if (!pdev)
-               return -ENODEV;
-
-       if (!of_device_is_compatible(np, mediatek_gsw_match->compatible))
-               return -EINVAL;
-
-       gsw = platform_get_drvdata(pdev);
-       eth->sw_priv = gsw;
-
-       if (!gsw->irq)
-               return -EINVAL;
-
-       request_irq(gsw->irq, gsw_interrupt_mt7621, 0,
-                   "gsw", eth);
-       disable_irq(gsw->irq);
-
-       mt7621_hw_init(eth, gsw, np);
-
-       enable_irq(gsw->irq);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(mtk_gsw_init);
-
-static int mt7621_gsw_probe(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       struct mt7620_gsw *gsw;
-
-       gsw = devm_kzalloc(&pdev->dev, sizeof(struct mt7620_gsw), GFP_KERNEL);
-       if (!gsw)
-               return -ENOMEM;
-
-       gsw->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(gsw->base))
-               return PTR_ERR(gsw->base);
-
-       gsw->dev = &pdev->dev;
-       gsw->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
-
-       platform_set_drvdata(pdev, gsw);
-
-       return 0;
-}
-
-static int mt7621_gsw_remove(struct platform_device *pdev)
-{
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static struct platform_driver gsw_driver = {
-       .probe = mt7621_gsw_probe,
-       .remove = mt7621_gsw_remove,
-       .driver = {
-               .name = "mt7621-gsw",
-               .of_match_table = mediatek_gsw_match,
-       },
-};
-
-module_platform_driver(gsw_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("GBit switch driver for Mediatek MT7621 SoC");
diff --git a/drivers/staging/mt7621-eth/mdio.c b/drivers/staging/mt7621-eth/mdio.c
deleted file mode 100644 (file)
index 5fea6a4..0000000
+++ /dev/null
@@ -1,275 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/phy.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-
-static int mtk_mdio_reset(struct mii_bus *bus)
-{
-       /* TODO */
-       return 0;
-}
-
-static void mtk_phy_link_adjust(struct net_device *dev)
-{
-       struct mtk_eth *eth = netdev_priv(dev);
-       unsigned long flags;
-       int i;
-
-       spin_lock_irqsave(&eth->phy->lock, flags);
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_node[i]) {
-                       struct phy_device *phydev = eth->phy->phy[i];
-                       int status_change = 0;
-
-                       if (phydev->link)
-                               if (eth->phy->duplex[i] != phydev->duplex ||
-                                   eth->phy->speed[i] != phydev->speed)
-                                       status_change = 1;
-
-                       if (phydev->link != eth->link[i])
-                               status_change = 1;
-
-                       switch (phydev->speed) {
-                       case SPEED_1000:
-                       case SPEED_100:
-                       case SPEED_10:
-                               eth->link[i] = phydev->link;
-                               eth->phy->duplex[i] = phydev->duplex;
-                               eth->phy->speed[i] = phydev->speed;
-
-                               if (status_change &&
-                                   eth->soc->mdio_adjust_link)
-                                       eth->soc->mdio_adjust_link(eth, i);
-                               break;
-                       }
-               }
-       }
-       spin_unlock_irqrestore(&eth->phy->lock, flags);
-}
-
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-                        struct device_node *phy_node)
-{
-       const __be32 *_port = NULL;
-       struct phy_device *phydev;
-       int phy_mode, port;
-
-       _port = of_get_property(phy_node, "reg", NULL);
-
-       if (!_port || (be32_to_cpu(*_port) >= 0x20)) {
-               pr_err("%pOFn: invalid port id\n", phy_node);
-               return -EINVAL;
-       }
-       port = be32_to_cpu(*_port);
-       phy_mode = of_get_phy_mode(phy_node);
-       if (phy_mode < 0) {
-               dev_err(eth->dev, "incorrect phy-mode %d\n", phy_mode);
-               eth->phy->phy_node[port] = NULL;
-               return -EINVAL;
-       }
-
-       phydev = of_phy_connect(eth->netdev[mac->id], phy_node,
-                               mtk_phy_link_adjust, 0, phy_mode);
-       if (!phydev) {
-               dev_err(eth->dev, "could not connect to PHY\n");
-               eth->phy->phy_node[port] = NULL;
-               return -ENODEV;
-       }
-
-       phydev->supported &= PHY_1000BT_FEATURES;
-       phydev->advertising = phydev->supported;
-
-       dev_info(eth->dev,
-                "connected port %d to PHY at %s [uid=%08x, driver=%s]\n",
-                port, phydev_name(phydev), phydev->phy_id,
-                phydev->drv->name);
-
-       eth->phy->phy[port] = phydev;
-       eth->link[port] = 0;
-
-       return 0;
-}
-
-static void phy_init(struct mtk_eth *eth, struct mtk_mac *mac,
-                    struct phy_device *phy)
-{
-       phy_attach(eth->netdev[mac->id], phydev_name(phy),
-                  PHY_INTERFACE_MODE_MII);
-
-       phy->autoneg = AUTONEG_ENABLE;
-       phy->speed = 0;
-       phy->duplex = 0;
-       phy_set_max_speed(phy, SPEED_100);
-       phy->advertising = phy->supported | ADVERTISED_Autoneg;
-
-       phy_start_aneg(phy);
-}
-
-static int mtk_phy_connect(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       int i;
-
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_node[i]) {
-                       if (!mac->phy_dev) {
-                               mac->phy_dev = eth->phy->phy[i];
-                               mac->phy_flags = MTK_PHY_FLAG_PORT;
-                       }
-               } else if (eth->mii_bus) {
-                       struct phy_device *phy;
-
-                       phy = mdiobus_get_phy(eth->mii_bus, i);
-                       if (phy) {
-                               phy_init(eth, mac, phy);
-                               if (!mac->phy_dev) {
-                                       mac->phy_dev = phy;
-                                       mac->phy_flags = MTK_PHY_FLAG_ATTACH;
-                               }
-                       }
-               }
-       }
-
-       return 0;
-}
-
-static void mtk_phy_disconnect(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++)
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 0;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_disconnect(eth->phy->phy[i]);
-               } else if (eth->mii_bus) {
-                       struct phy_device *phy =
-                               mdiobus_get_phy(eth->mii_bus, i);
-
-                       if (phy)
-                               phy_detach(phy);
-               }
-}
-
-static void mtk_phy_start(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++) {
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 1;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_start(eth->phy->phy[i]);
-               }
-       }
-}
-
-static void mtk_phy_stop(struct mtk_mac *mac)
-{
-       struct mtk_eth *eth = mac->hw;
-       unsigned long flags;
-       int i;
-
-       for (i = 0; i < 8; i++)
-               if (eth->phy->phy_fixed[i]) {
-                       spin_lock_irqsave(&eth->phy->lock, flags);
-                       eth->link[i] = 0;
-                       if (eth->soc->mdio_adjust_link)
-                               eth->soc->mdio_adjust_link(eth, i);
-                       spin_unlock_irqrestore(&eth->phy->lock, flags);
-               } else if (eth->phy->phy[i]) {
-                       phy_stop(eth->phy->phy[i]);
-               }
-}
-
-static struct mtk_phy phy_ralink = {
-       .connect = mtk_phy_connect,
-       .disconnect = mtk_phy_disconnect,
-       .start = mtk_phy_start,
-       .stop = mtk_phy_stop,
-};
-
-int mtk_mdio_init(struct mtk_eth *eth)
-{
-       struct device_node *mii_np;
-       int err;
-
-       if (!eth->soc->mdio_read || !eth->soc->mdio_write)
-               return 0;
-
-       spin_lock_init(&phy_ralink.lock);
-       eth->phy = &phy_ralink;
-
-       mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
-       if (!mii_np) {
-               dev_err(eth->dev, "no %s child node found", "mdio-bus");
-               return -ENODEV;
-       }
-
-       if (!of_device_is_available(mii_np)) {
-               err = 0;
-               goto err_put_node;
-       }
-
-       eth->mii_bus = mdiobus_alloc();
-       if (!eth->mii_bus) {
-               err = -ENOMEM;
-               goto err_put_node;
-       }
-
-       eth->mii_bus->name = "mdio";
-       eth->mii_bus->read = eth->soc->mdio_read;
-       eth->mii_bus->write = eth->soc->mdio_write;
-       eth->mii_bus->reset = mtk_mdio_reset;
-       eth->mii_bus->priv = eth;
-       eth->mii_bus->parent = eth->dev;
-
-       snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
-       err = of_mdiobus_register(eth->mii_bus, mii_np);
-       if (err)
-               goto err_free_bus;
-
-       return 0;
-
-err_free_bus:
-       kfree(eth->mii_bus);
-err_put_node:
-       of_node_put(mii_np);
-       eth->mii_bus = NULL;
-       return err;
-}
-
-void mtk_mdio_cleanup(struct mtk_eth *eth)
-{
-       if (!eth->mii_bus)
-               return;
-
-       mdiobus_unregister(eth->mii_bus);
-       of_node_put(eth->mii_bus->dev.of_node);
-       kfree(eth->mii_bus);
-}
diff --git a/drivers/staging/mt7621-eth/mdio.h b/drivers/staging/mt7621-eth/mdio.h
deleted file mode 100644 (file)
index b14e238..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef _RALINK_MDIO_H__
-#define _RALINK_MDIO_H__
-
-#ifdef CONFIG_NET_MEDIATEK_MDIO
-int mtk_mdio_init(struct mtk_eth *eth);
-void mtk_mdio_cleanup(struct mtk_eth *eth);
-int mtk_connect_phy_node(struct mtk_eth *eth, struct mtk_mac *mac,
-                        struct device_node *phy_node);
-#else
-static inline int mtk_mdio_init(struct mtk_eth *eth) { return 0; }
-static inline void mtk_mdio_cleanup(struct mtk_eth *eth) {}
-#endif
-#endif
diff --git a/drivers/staging/mt7621-eth/mdio_mt7620.c b/drivers/staging/mt7621-eth/mdio_mt7620.c
deleted file mode 100644 (file)
index ced605c..0000000
+++ /dev/null
@@ -1,173 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-static int mt7620_mii_busy_wait(struct mt7620_gsw *gsw)
-{
-       unsigned long t_start = jiffies;
-
-       while (1) {
-               if (!(mtk_switch_r32(gsw,
-                                    gsw->piac_offset + MT7620_GSW_REG_PIAC) &
-                                    GSW_MDIO_ACCESS))
-                       return 0;
-               if (time_after(jiffies, t_start + GSW_REG_PHY_TIMEOUT))
-                       break;
-       }
-
-       dev_err(gsw->dev, "mdio: MDIO timeout\n");
-       return -1;
-}
-
-u32 _mt7620_mii_write(struct mt7620_gsw *gsw, u32 phy_addr,
-                     u32 phy_register, u32 write_data)
-{
-       if (mt7620_mii_busy_wait(gsw))
-               return -1;
-
-       write_data &= 0xffff;
-
-       mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_WRITE |
-               (phy_register << GSW_MDIO_REG_SHIFT) |
-               (phy_addr << GSW_MDIO_ADDR_SHIFT) | write_data,
-               MT7620_GSW_REG_PIAC);
-
-       if (mt7620_mii_busy_wait(gsw))
-               return -1;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_write);
-
-u32 _mt7620_mii_read(struct mt7620_gsw *gsw, int phy_addr, int phy_reg)
-{
-       u32 d;
-
-       if (mt7620_mii_busy_wait(gsw))
-               return 0xffff;
-
-       mtk_switch_w32(gsw, GSW_MDIO_ACCESS | GSW_MDIO_START | GSW_MDIO_READ |
-               (phy_reg << GSW_MDIO_REG_SHIFT) |
-               (phy_addr << GSW_MDIO_ADDR_SHIFT),
-               MT7620_GSW_REG_PIAC);
-
-       if (mt7620_mii_busy_wait(gsw))
-               return 0xffff;
-
-       d = mtk_switch_r32(gsw, MT7620_GSW_REG_PIAC) & 0xffff;
-
-       return d;
-}
-EXPORT_SYMBOL_GPL(_mt7620_mii_read);
-
-int mt7620_mdio_write(struct mii_bus *bus, int phy_addr, int phy_reg, u16 val)
-{
-       struct mtk_eth *eth = bus->priv;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-       return _mt7620_mii_write(gsw, phy_addr, phy_reg, val);
-}
-
-int mt7620_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
-{
-       struct mtk_eth *eth = bus->priv;
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-
-       return _mt7620_mii_read(gsw, phy_addr, phy_reg);
-}
-
-void mt7530_mdio_w32(struct mt7620_gsw *gsw, u32 reg, u32 val)
-{
-       _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-       _mt7620_mii_write(gsw, 0x1f, (reg >> 2) & 0xf,  val & 0xffff);
-       _mt7620_mii_write(gsw, 0x1f, 0x10, val >> 16);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_w32);
-
-u32 mt7530_mdio_r32(struct mt7620_gsw *gsw, u32 reg)
-{
-       u16 high, low;
-
-       _mt7620_mii_write(gsw, 0x1f, 0x1f, (reg >> 6) & 0x3ff);
-       low = _mt7620_mii_read(gsw, 0x1f, (reg >> 2) & 0xf);
-       high = _mt7620_mii_read(gsw, 0x1f, 0x10);
-
-       return (high << 16) | (low & 0xffff);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_r32);
-
-void mt7530_mdio_m32(struct mt7620_gsw *gsw, u32 mask, u32 set, u32 reg)
-{
-       u32 val = mt7530_mdio_r32(gsw, reg);
-
-       val &= ~mask;
-       val |= set;
-       mt7530_mdio_w32(gsw, reg, val);
-}
-EXPORT_SYMBOL_GPL(mt7530_mdio_m32);
-
-static unsigned char *mtk_speed_str(int speed)
-{
-       switch (speed) {
-       case 2:
-       case SPEED_1000:
-               return "1000";
-       case 1:
-       case SPEED_100:
-               return "100";
-       case 0:
-       case SPEED_10:
-               return "10";
-       }
-
-       return "? ";
-}
-
-int mt7620_has_carrier(struct mtk_eth *eth)
-{
-       struct mt7620_gsw *gsw = (struct mt7620_gsw *)eth->sw_priv;
-       int i;
-
-       for (i = 0; i < GSW_PORT6; i++)
-               if (mt7530_mdio_r32(gsw, GSW_REG_PORT_STATUS(i)) & 0x1)
-                       return 1;
-       return 0;
-}
-
-void mt7620_print_link_state(struct mtk_eth *eth, int port, int link,
-                            int speed, int duplex)
-{
-       struct mt7620_gsw *gsw = eth->sw_priv;
-
-       if (link)
-               dev_info(gsw->dev, "port %d link up (%sMbps/%s duplex)\n",
-                        port, mtk_speed_str(speed),
-                        (duplex) ? "Full" : "Half");
-       else
-               dev_info(gsw->dev, "port %d link down\n", port);
-}
-
-void mt7620_mdio_link_adjust(struct mtk_eth *eth, int port)
-{
-       mt7620_print_link_state(eth, port, eth->link[port],
-                               eth->phy->speed[port],
-                               (eth->phy->duplex[port] == DUPLEX_FULL));
-}
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.c b/drivers/staging/mt7621-eth/mtk_eth_soc.c
deleted file mode 100644 (file)
index 6027b19..0000000
+++ /dev/null
@@ -1,2176 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/dma-mapping.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/mfd/syscon.h>
-#include <linux/clk.h>
-#include <linux/of_net.h>
-#include <linux/of_mdio.h>
-#include <linux/if_vlan.h>
-#include <linux/reset.h>
-#include <linux/tcp.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/regmap.h>
-
-#include "mtk_eth_soc.h"
-#include "mdio.h"
-#include "ethtool.h"
-
-#define        MAX_RX_LENGTH           1536
-#define MTK_RX_ETH_HLEN                (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
-#define MTK_RX_HLEN            (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
-#define DMA_DUMMY_DESC         0xffffffff
-#define MTK_DEFAULT_MSG_ENABLE \
-               (NETIF_MSG_DRV | \
-               NETIF_MSG_PROBE | \
-               NETIF_MSG_LINK | \
-               NETIF_MSG_TIMER | \
-               NETIF_MSG_IFDOWN | \
-               NETIF_MSG_IFUP | \
-               NETIF_MSG_RX_ERR | \
-               NETIF_MSG_TX_ERR)
-
-#define TX_DMA_DESP2_DEF       (TX_DMA_LS0 | TX_DMA_DONE)
-#define NEXT_TX_DESP_IDX(X)    (((X) + 1) & (ring->tx_ring_size - 1))
-#define NEXT_RX_DESP_IDX(X)    (((X) + 1) & (ring->rx_ring_size - 1))
-
-#define SYSC_REG_RSTCTRL       0x34
-
-static int mtk_msg_level = -1;
-module_param_named(msg_level, mtk_msg_level, int, 0);
-MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
-
-static const u16 mtk_reg_table_default[MTK_REG_COUNT] = {
-       [MTK_REG_PDMA_GLO_CFG] = MTK_PDMA_GLO_CFG,
-       [MTK_REG_PDMA_RST_CFG] = MTK_PDMA_RST_CFG,
-       [MTK_REG_DLY_INT_CFG] = MTK_DLY_INT_CFG,
-       [MTK_REG_TX_BASE_PTR0] = MTK_TX_BASE_PTR0,
-       [MTK_REG_TX_MAX_CNT0] = MTK_TX_MAX_CNT0,
-       [MTK_REG_TX_CTX_IDX0] = MTK_TX_CTX_IDX0,
-       [MTK_REG_TX_DTX_IDX0] = MTK_TX_DTX_IDX0,
-       [MTK_REG_RX_BASE_PTR0] = MTK_RX_BASE_PTR0,
-       [MTK_REG_RX_MAX_CNT0] = MTK_RX_MAX_CNT0,
-       [MTK_REG_RX_CALC_IDX0] = MTK_RX_CALC_IDX0,
-       [MTK_REG_RX_DRX_IDX0] = MTK_RX_DRX_IDX0,
-       [MTK_REG_MTK_INT_ENABLE] = MTK_INT_ENABLE,
-       [MTK_REG_MTK_INT_STATUS] = MTK_INT_STATUS,
-       [MTK_REG_MTK_DMA_VID_BASE] = MTK_DMA_VID0,
-       [MTK_REG_MTK_COUNTER_BASE] = MTK_GDMA1_TX_GBCNT,
-       [MTK_REG_MTK_RST_GL] = MTK_RST_GL,
-};
-
-static const u16 *mtk_reg_table = mtk_reg_table_default;
-
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg)
-{
-       __raw_writel(val, eth->base + reg);
-}
-
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg)
-{
-       return __raw_readl(eth->base + reg);
-}
-
-static void mtk_reg_w32(struct mtk_eth *eth, u32 val, enum mtk_reg reg)
-{
-       mtk_w32(eth, val, mtk_reg_table[reg]);
-}
-
-static u32 mtk_reg_r32(struct mtk_eth *eth, enum mtk_reg reg)
-{
-       return mtk_r32(eth, mtk_reg_table[reg]);
-}
-
-/* these bits are also exposed via the reset-controller API. however the switch
- * and FE need to be brought out of reset in the exakt same moemtn and the
- * reset-controller api does not provide this feature yet. Do the reset manually
- * until we fixed the reset-controller api to be able to do this
- */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits)
-{
-       u32 val;
-
-       regmap_read(eth->ethsys, SYSC_REG_RSTCTRL, &val);
-       val |= reset_bits;
-       regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-       usleep_range(10, 20);
-       val &= ~reset_bits;
-       regmap_write(eth->ethsys, SYSC_REG_RSTCTRL, val);
-       usleep_range(10, 20);
-}
-EXPORT_SYMBOL(mtk_reset);
-
-static inline void mtk_irq_ack(struct mtk_eth *eth, u32 mask)
-{
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_reg_w32(eth, mask, MTK_REG_MTK_INT_STATUS);
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, mask, MTK_QMTK_INT_STATUS);
-}
-
-static inline u32 mtk_irq_pending(struct mtk_eth *eth)
-{
-       u32 status = 0;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               status |= mtk_reg_r32(eth, MTK_REG_MTK_INT_STATUS);
-       if (eth->soc->dma_type & MTK_QDMA)
-               status |= mtk_r32(eth, MTK_QMTK_INT_STATUS);
-
-       return status;
-}
-
-static void mtk_irq_ack_status(struct mtk_eth *eth, u32 mask)
-{
-       u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-       if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-               status_reg = MTK_REG_MTK_INT_STATUS2;
-
-       mtk_reg_w32(eth, mask, status_reg);
-}
-
-static u32 mtk_irq_pending_status(struct mtk_eth *eth)
-{
-       u32 status_reg = MTK_REG_MTK_INT_STATUS;
-
-       if (mtk_reg_table[MTK_REG_MTK_INT_STATUS2])
-               status_reg = MTK_REG_MTK_INT_STATUS2;
-
-       return mtk_reg_r32(eth, status_reg);
-}
-
-static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
-{
-       u32 val;
-
-       if (eth->soc->dma_type & MTK_PDMA) {
-               val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-               mtk_reg_w32(eth, val & ~mask, MTK_REG_MTK_INT_ENABLE);
-               /* flush write */
-               mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-               mtk_w32(eth, val & ~mask, MTK_QMTK_INT_ENABLE);
-               /* flush write */
-               mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-       }
-}
-
-static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
-{
-       u32 val;
-
-       if (eth->soc->dma_type & MTK_PDMA) {
-               val = mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-               mtk_reg_w32(eth, val | mask, MTK_REG_MTK_INT_ENABLE);
-               /* flush write */
-               mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               val = mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-               mtk_w32(eth, val | mask, MTK_QMTK_INT_ENABLE);
-               /* flush write */
-               mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-       }
-}
-
-static inline u32 mtk_irq_enabled(struct mtk_eth *eth)
-{
-       u32 enabled = 0;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               enabled |= mtk_reg_r32(eth, MTK_REG_MTK_INT_ENABLE);
-       if (eth->soc->dma_type & MTK_QDMA)
-               enabled |= mtk_r32(eth, MTK_QMTK_INT_ENABLE);
-
-       return enabled;
-}
-
-static inline void mtk_hw_set_macaddr(struct mtk_mac *mac,
-                                     unsigned char *macaddr)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&mac->hw->page_lock, flags);
-       mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], MTK_GDMA1_MAC_ADRH);
-       mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
-               (macaddr[4] << 8) | macaddr[5],
-               MTK_GDMA1_MAC_ADRL);
-       spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static int mtk_set_mac_address(struct net_device *dev, void *p)
-{
-       int ret = eth_mac_addr(dev, p);
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       if (ret)
-               return ret;
-
-       if (eth->soc->set_mac)
-               eth->soc->set_mac(mac, dev->dev_addr);
-       else
-               mtk_hw_set_macaddr(mac, p);
-
-       return 0;
-}
-
-static inline int mtk_max_frag_size(int mtu)
-{
-       /* make sure buf_size will be at least MAX_RX_LENGTH */
-       if (mtu + MTK_RX_ETH_HLEN < MAX_RX_LENGTH)
-               mtu = MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
-
-       return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
-               SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-}
-
-static inline int mtk_max_buf_size(int frag_size)
-{
-       int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
-                      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
-
-       WARN_ON(buf_size < MAX_RX_LENGTH);
-
-       return buf_size;
-}
-
-static inline void mtk_get_rxd(struct mtk_rx_dma *rxd,
-                              struct mtk_rx_dma *dma_rxd)
-{
-       rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
-       rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
-       rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
-       rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-}
-
-static inline void mtk_set_txd_pdma(struct mtk_tx_dma *txd,
-                                   struct mtk_tx_dma *dma_txd)
-{
-       WRITE_ONCE(dma_txd->txd1, txd->txd1);
-       WRITE_ONCE(dma_txd->txd3, txd->txd3);
-       WRITE_ONCE(dma_txd->txd4, txd->txd4);
-       /* clean dma done flag last */
-       WRITE_ONCE(dma_txd->txd2, txd->txd2);
-}
-
-static void mtk_clean_rx(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-       int i;
-
-       if (ring->rx_data && ring->rx_dma) {
-               for (i = 0; i < ring->rx_ring_size; i++) {
-                       if (!ring->rx_data[i])
-                               continue;
-                       if (!ring->rx_dma[i].rxd1)
-                               continue;
-                       dma_unmap_single(eth->dev,
-                                        ring->rx_dma[i].rxd1,
-                                        ring->rx_buf_size,
-                                        DMA_FROM_DEVICE);
-                       skb_free_frag(ring->rx_data[i]);
-               }
-               kfree(ring->rx_data);
-               ring->rx_data = NULL;
-       }
-
-       if (ring->rx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->rx_ring_size * sizeof(*ring->rx_dma),
-                                 ring->rx_dma,
-                                 ring->rx_phys);
-               ring->rx_dma = NULL;
-       }
-}
-
-static int mtk_dma_rx_alloc(struct mtk_eth *eth, struct mtk_rx_ring *ring)
-{
-       int i, pad = 0;
-
-       ring->frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-       ring->rx_buf_size = mtk_max_buf_size(ring->frag_size);
-       ring->rx_ring_size = eth->soc->dma_ring_size;
-       ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data),
-                               GFP_KERNEL);
-       if (!ring->rx_data)
-               goto no_rx_mem;
-
-       for (i = 0; i < ring->rx_ring_size; i++) {
-               ring->rx_data[i] = netdev_alloc_frag(ring->frag_size);
-               if (!ring->rx_data[i])
-                       goto no_rx_mem;
-       }
-
-       ring->rx_dma =
-               dma_alloc_coherent(eth->dev,
-                                  ring->rx_ring_size * sizeof(*ring->rx_dma),
-                                  &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->rx_dma)
-               goto no_rx_mem;
-
-       if (!eth->soc->rx_2b_offset)
-               pad = NET_IP_ALIGN;
-
-       for (i = 0; i < ring->rx_ring_size; i++) {
-               dma_addr_t dma_addr = dma_map_single(eth->dev,
-                               ring->rx_data[i] + NET_SKB_PAD + pad,
-                               ring->rx_buf_size,
-                               DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-                       goto no_rx_mem;
-               ring->rx_dma[i].rxd1 = (unsigned int)dma_addr;
-
-               if (eth->soc->rx_sg_dma)
-                       ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-               else
-                       ring->rx_dma[i].rxd2 = RX_DMA_LSO;
-       }
-       ring->rx_calc_idx = ring->rx_ring_size - 1;
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       return 0;
-
-no_rx_mem:
-       return -ENOMEM;
-}
-
-static void mtk_txd_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
-{
-       if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
-               dma_unmap_single(dev,
-                                dma_unmap_addr(tx_buf, dma_addr0),
-                                dma_unmap_len(tx_buf, dma_len0),
-                                DMA_TO_DEVICE);
-       } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
-               dma_unmap_page(dev,
-                              dma_unmap_addr(tx_buf, dma_addr0),
-                              dma_unmap_len(tx_buf, dma_len0),
-                              DMA_TO_DEVICE);
-       }
-       if (tx_buf->flags & MTK_TX_FLAGS_PAGE1)
-               dma_unmap_page(dev,
-                              dma_unmap_addr(tx_buf, dma_addr1),
-                              dma_unmap_len(tx_buf, dma_len1),
-                              DMA_TO_DEVICE);
-
-       tx_buf->flags = 0;
-       if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC))
-               dev_kfree_skb_any(tx_buf->skb);
-       tx_buf->skb = NULL;
-}
-
-static void mtk_pdma_tx_clean(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i;
-
-       if (ring->tx_buf) {
-               for (i = 0; i < ring->tx_ring_size; i++)
-                       mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-               kfree(ring->tx_buf);
-               ring->tx_buf = NULL;
-       }
-
-       if (ring->tx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                 ring->tx_dma,
-                                 ring->tx_phys);
-               ring->tx_dma = NULL;
-       }
-}
-
-static void mtk_qdma_tx_clean(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i;
-
-       if (ring->tx_buf) {
-               for (i = 0; i < ring->tx_ring_size; i++)
-                       mtk_txd_unmap(eth->dev, &ring->tx_buf[i]);
-               kfree(ring->tx_buf);
-               ring->tx_buf = NULL;
-       }
-
-       if (ring->tx_dma) {
-               dma_free_coherent(eth->dev,
-                                 ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                 ring->tx_dma,
-                                 ring->tx_phys);
-               ring->tx_dma = NULL;
-       }
-}
-
-void mtk_stats_update_mac(struct mtk_mac *mac)
-{
-       struct mtk_hw_stats *hw_stats = mac->hw_stats;
-       unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-       u64 stats;
-
-       base += hw_stats->reg_offset;
-
-       u64_stats_update_begin(&hw_stats->syncp);
-
-       if (mac->hw->soc->new_stats) {
-               hw_stats->rx_bytes += mtk_r32(mac->hw, base);
-               stats =  mtk_r32(mac->hw, base + 0x04);
-               if (stats)
-                       hw_stats->rx_bytes += (stats << 32);
-               hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x08);
-               hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x10);
-               hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x14);
-               hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x18);
-               hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x1c);
-               hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x20);
-               hw_stats->rx_flow_control_packets +=
-                                               mtk_r32(mac->hw, base + 0x24);
-               hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x28);
-               hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x2c);
-               hw_stats->tx_bytes += mtk_r32(mac->hw, base + 0x30);
-               stats =  mtk_r32(mac->hw, base + 0x34);
-               if (stats)
-                       hw_stats->tx_bytes += (stats << 32);
-               hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x38);
-       } else {
-               hw_stats->tx_bytes += mtk_r32(mac->hw, base);
-               hw_stats->tx_packets += mtk_r32(mac->hw, base + 0x04);
-               hw_stats->tx_skip += mtk_r32(mac->hw, base + 0x08);
-               hw_stats->tx_collisions += mtk_r32(mac->hw, base + 0x0c);
-               hw_stats->rx_bytes += mtk_r32(mac->hw, base + 0x20);
-               hw_stats->rx_packets += mtk_r32(mac->hw, base + 0x24);
-               hw_stats->rx_overflow += mtk_r32(mac->hw, base + 0x28);
-               hw_stats->rx_fcs_errors += mtk_r32(mac->hw, base + 0x2c);
-               hw_stats->rx_short_errors += mtk_r32(mac->hw, base + 0x30);
-               hw_stats->rx_long_errors += mtk_r32(mac->hw, base + 0x34);
-               hw_stats->rx_checksum_errors += mtk_r32(mac->hw, base + 0x38);
-               hw_stats->rx_flow_control_packets +=
-                                               mtk_r32(mac->hw, base + 0x3c);
-       }
-
-       u64_stats_update_end(&hw_stats->syncp);
-}
-
-static void mtk_get_stats64(struct net_device *dev,
-                           struct rtnl_link_stats64 *storage)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_hw_stats *hw_stats = mac->hw_stats;
-       unsigned int base = mtk_reg_table[MTK_REG_MTK_COUNTER_BASE];
-       unsigned int start;
-
-       if (!base) {
-               netdev_stats_to_stats64(storage, &dev->stats);
-               return;
-       }
-
-       if (netif_running(dev) && netif_device_present(dev)) {
-               if (spin_trylock(&hw_stats->stats_lock)) {
-                       mtk_stats_update_mac(mac);
-                       spin_unlock(&hw_stats->stats_lock);
-               }
-       }
-
-       do {
-               start = u64_stats_fetch_begin_irq(&hw_stats->syncp);
-               storage->rx_packets = hw_stats->rx_packets;
-               storage->tx_packets = hw_stats->tx_packets;
-               storage->rx_bytes = hw_stats->rx_bytes;
-               storage->tx_bytes = hw_stats->tx_bytes;
-               storage->collisions = hw_stats->tx_collisions;
-               storage->rx_length_errors = hw_stats->rx_short_errors +
-                       hw_stats->rx_long_errors;
-               storage->rx_over_errors = hw_stats->rx_overflow;
-               storage->rx_crc_errors = hw_stats->rx_fcs_errors;
-               storage->rx_errors = hw_stats->rx_checksum_errors;
-               storage->tx_aborted_errors = hw_stats->tx_skip;
-       } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start));
-
-       storage->tx_errors = dev->stats.tx_errors;
-       storage->rx_dropped = dev->stats.rx_dropped;
-       storage->tx_dropped = dev->stats.tx_dropped;
-}
-
-static int mtk_vlan_rx_add_vid(struct net_device *dev,
-                              __be16 proto, u16 vid)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 idx = (vid & 0xf);
-       u32 vlan_cfg;
-
-       if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-             (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-               return 0;
-
-       if (test_bit(idx, &eth->vlan_map)) {
-               netdev_warn(dev, "disable tx vlan offload\n");
-               dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX;
-               netdev_update_features(dev);
-       } else {
-               vlan_cfg = mtk_r32(eth,
-                                  mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                                  ((idx >> 1) << 2));
-               if (idx & 0x1) {
-                       vlan_cfg &= 0xffff;
-                       vlan_cfg |= (vid << 16);
-               } else {
-                       vlan_cfg &= 0xffff0000;
-                       vlan_cfg |= vid;
-               }
-               mtk_w32(eth,
-                       vlan_cfg, mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                       ((idx >> 1) << 2));
-               set_bit(idx, &eth->vlan_map);
-       }
-
-       return 0;
-}
-
-static int mtk_vlan_rx_kill_vid(struct net_device *dev,
-                               __be16 proto, u16 vid)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 idx = (vid & 0xf);
-
-       if (!((mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE]) &&
-             (dev->features & NETIF_F_HW_VLAN_CTAG_TX)))
-               return 0;
-
-       clear_bit(idx, &eth->vlan_map);
-
-       return 0;
-}
-
-static inline u32 mtk_pdma_empty_txd(struct mtk_tx_ring *ring)
-{
-       barrier();
-       return (u32)(ring->tx_ring_size -
-                    ((ring->tx_next_idx - ring->tx_free_idx) &
-                     (ring->tx_ring_size - 1)));
-}
-
-static int mtk_skb_padto(struct sk_buff *skb, struct mtk_eth *eth)
-{
-       unsigned int len;
-       int ret;
-
-       if (unlikely(skb->len >= VLAN_ETH_ZLEN))
-               return 0;
-
-       if (eth->soc->padding_64b && !eth->soc->padding_bug)
-               return 0;
-
-       if (skb_vlan_tag_present(skb))
-               len = ETH_ZLEN;
-       else if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               len = VLAN_ETH_ZLEN;
-       else if (!eth->soc->padding_64b)
-               len = ETH_ZLEN;
-       else
-               return 0;
-
-       if (skb->len >= len)
-               return 0;
-
-       ret = skb_pad(skb, len - skb->len);
-       if (ret < 0)
-               return ret;
-       skb->len = len;
-       skb_set_tail_pointer(skb, len);
-
-       return ret;
-}
-
-static int mtk_pdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-                          int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct skb_frag_struct *frag;
-       struct mtk_tx_dma txd, *ptxd;
-       struct mtk_tx_buf *tx_buf;
-       int i, j, k, frag_size, frag_map_size, offset;
-       dma_addr_t mapped_addr;
-       unsigned int nr_frags;
-       u32 def_txd4;
-
-       if (mtk_skb_padto(skb, eth)) {
-               netif_warn(eth, tx_err, dev, "tx padding failed!\n");
-               return -1;
-       }
-
-       tx_buf = &ring->tx_buf[ring->tx_next_idx];
-       memset(tx_buf, 0, sizeof(*tx_buf));
-       memset(&txd, 0, sizeof(txd));
-       nr_frags = skb_shinfo(skb)->nr_frags;
-
-       /* init tx descriptor */
-       def_txd4 = eth->soc->txd4;
-       txd.txd4 = def_txd4;
-
-       if (eth->soc->mac_count > 1)
-               txd.txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-       if (gso)
-               txd.txd4 |= TX_DMA_TSO;
-
-       /* TX Checksum offload */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               txd.txd4 |= TX_DMA_CHKSUM;
-
-       /* VLAN header offload */
-       if (skb_vlan_tag_present(skb)) {
-               u16 tag = skb_vlan_tag_get(skb);
-
-               txd.txd4 |= TX_DMA_INS_VLAN |
-                       ((tag >> VLAN_PRIO_SHIFT) << 4) |
-                       (tag & 0xF);
-       }
-
-       mapped_addr = dma_map_single(&dev->dev, skb->data,
-                                    skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-               return -1;
-
-       txd.txd1 = mapped_addr;
-       txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb));
-
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-       /* TX SG offload */
-       j = ring->tx_next_idx;
-       k = 0;
-       for (i = 0; i < nr_frags; i++) {
-               offset = 0;
-               frag = &skb_shinfo(skb)->frags[i];
-               frag_size = skb_frag_size(frag);
-
-               while (frag_size > 0) {
-                       frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-                       mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-                                                      frag_map_size,
-                                                      DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-                               goto err_dma;
-
-                       if (k & 0x1) {
-                               j = NEXT_TX_DESP_IDX(j);
-                               txd.txd1 = mapped_addr;
-                               txd.txd2 = TX_DMA_PLEN0(frag_map_size);
-                               txd.txd4 = def_txd4;
-
-                               tx_buf = &ring->tx_buf[j];
-                               memset(tx_buf, 0, sizeof(*tx_buf));
-
-                               tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-                               dma_unmap_addr_set(tx_buf, dma_addr0,
-                                                  mapped_addr);
-                               dma_unmap_len_set(tx_buf, dma_len0,
-                                                 frag_map_size);
-                       } else {
-                               txd.txd3 = mapped_addr;
-                               txd.txd2 |= TX_DMA_PLEN1(frag_map_size);
-
-                               tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-                               tx_buf->flags |= MTK_TX_FLAGS_PAGE1;
-                               dma_unmap_addr_set(tx_buf, dma_addr1,
-                                                  mapped_addr);
-                               dma_unmap_len_set(tx_buf, dma_len1,
-                                                 frag_map_size);
-
-                               if (!((i == (nr_frags - 1)) &&
-                                     (frag_map_size == frag_size))) {
-                                       mtk_set_txd_pdma(&txd,
-                                                        &ring->tx_dma[j]);
-                                       memset(&txd, 0, sizeof(txd));
-                               }
-                       }
-                       frag_size -= frag_map_size;
-                       offset += frag_map_size;
-                       k++;
-               }
-       }
-
-       /* set last segment */
-       if (k & 0x1)
-               txd.txd2 |= TX_DMA_LS1;
-       else
-               txd.txd2 |= TX_DMA_LS0;
-       mtk_set_txd_pdma(&txd, &ring->tx_dma[j]);
-
-       /* store skb to cleanup */
-       tx_buf->skb = skb;
-
-       netdev_sent_queue(dev, skb->len);
-       skb_tx_timestamp(skb);
-
-       ring->tx_next_idx = NEXT_TX_DESP_IDX(j);
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-               mtk_reg_w32(eth, ring->tx_next_idx, MTK_REG_TX_CTX_IDX0);
-
-       return 0;
-
-err_dma:
-       j = ring->tx_next_idx;
-       for (i = 0; i < tx_num; i++) {
-               ptxd = &ring->tx_dma[j];
-               tx_buf = &ring->tx_buf[j];
-
-               /* unmap dma */
-               mtk_txd_unmap(&dev->dev, tx_buf);
-
-               ptxd->txd2 = TX_DMA_DESP2_DEF;
-               j = NEXT_TX_DESP_IDX(j);
-       }
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-       return -1;
-}
-
-/* the qdma core needs scratch memory to be setup */
-static int mtk_init_fq_dma(struct mtk_eth *eth)
-{
-       dma_addr_t dma_addr, phy_ring_head, phy_ring_tail;
-       int cnt = eth->soc->dma_ring_size;
-       int i;
-
-       eth->scratch_ring = dma_alloc_coherent(eth->dev,
-                                              cnt * sizeof(struct mtk_tx_dma),
-                                              &phy_ring_head,
-                                              GFP_ATOMIC | __GFP_ZERO);
-       if (unlikely(!eth->scratch_ring))
-               return -ENOMEM;
-
-       eth->scratch_head = kcalloc(cnt, QDMA_PAGE_SIZE,
-                                   GFP_KERNEL);
-       dma_addr = dma_map_single(eth->dev,
-                                 eth->scratch_head, cnt * QDMA_PAGE_SIZE,
-                                 DMA_FROM_DEVICE);
-       if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
-               return -ENOMEM;
-
-       memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
-       phy_ring_tail = phy_ring_head + (sizeof(struct mtk_tx_dma) * (cnt - 1));
-
-       for (i = 0; i < cnt; i++) {
-               eth->scratch_ring[i].txd1 = (dma_addr + (i * QDMA_PAGE_SIZE));
-               if (i < cnt - 1)
-                       eth->scratch_ring[i].txd2 = (phy_ring_head +
-                               ((i + 1) * sizeof(struct mtk_tx_dma)));
-               eth->scratch_ring[i].txd3 = TX_QDMA_SDL(QDMA_PAGE_SIZE);
-       }
-
-       mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
-       mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
-       mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
-       mtk_w32(eth, QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
-
-       return 0;
-}
-
-static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
-{
-       void *ret = ring->tx_dma;
-
-       return ret + (desc - ring->tx_phys);
-}
-
-static struct mtk_tx_dma *mtk_tx_next_qdma(struct mtk_tx_ring *ring,
-                                          struct mtk_tx_dma *txd)
-{
-       return mtk_qdma_phys_to_virt(ring, txd->txd2);
-}
-
-static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
-                                            struct mtk_tx_dma *txd)
-{
-       int idx = txd - ring->tx_dma;
-
-       return &ring->tx_buf[idx];
-}
-
-static int mtk_qdma_tx_map(struct sk_buff *skb, struct net_device *dev,
-                          int tx_num, struct mtk_tx_ring *ring, bool gso)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_dma *itxd, *txd;
-       struct mtk_tx_buf *tx_buf;
-       dma_addr_t mapped_addr;
-       unsigned int nr_frags;
-       int i, n_desc = 1;
-       u32 txd4 = eth->soc->txd4;
-
-       itxd = ring->tx_next_free;
-       if (itxd == ring->tx_last_free)
-               return -ENOMEM;
-
-       if (eth->soc->mac_count > 1)
-               txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
-
-       tx_buf = mtk_desc_to_tx_buf(ring, itxd);
-       memset(tx_buf, 0, sizeof(*tx_buf));
-
-       if (gso)
-               txd4 |= TX_DMA_TSO;
-
-       /* TX Checksum offload */
-       if (skb->ip_summed == CHECKSUM_PARTIAL)
-               txd4 |= TX_DMA_CHKSUM;
-
-       /* VLAN header offload */
-       if (skb_vlan_tag_present(skb))
-               txd4 |= TX_DMA_INS_VLAN_MT7621 | skb_vlan_tag_get(skb);
-
-       mapped_addr = dma_map_single(&dev->dev, skb->data,
-                                    skb_headlen(skb), DMA_TO_DEVICE);
-       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-               return -ENOMEM;
-
-       WRITE_ONCE(itxd->txd1, mapped_addr);
-       tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
-       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-       dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb));
-
-       /* TX SG offload */
-       txd = itxd;
-       nr_frags = skb_shinfo(skb)->nr_frags;
-       for (i = 0; i < nr_frags; i++) {
-               struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
-               unsigned int offset = 0;
-               int frag_size = skb_frag_size(frag);
-
-               while (frag_size) {
-                       bool last_frag = false;
-                       unsigned int frag_map_size;
-
-                       txd = mtk_tx_next_qdma(ring, txd);
-                       if (txd == ring->tx_last_free)
-                               goto err_dma;
-
-                       n_desc++;
-                       frag_map_size = min(frag_size, TX_DMA_BUF_LEN);
-                       mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
-                                                      frag_map_size,
-                                                      DMA_TO_DEVICE);
-                       if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
-                               goto err_dma;
-
-                       if (i == nr_frags - 1 &&
-                           (frag_size - frag_map_size) == 0)
-                               last_frag = true;
-
-                       WRITE_ONCE(txd->txd1, mapped_addr);
-                       WRITE_ONCE(txd->txd3, (QDMA_TX_SWC |
-                                              TX_DMA_PLEN0(frag_map_size) |
-                                              last_frag * TX_DMA_LS0) |
-                                              mac->id);
-                       WRITE_ONCE(txd->txd4, 0);
-
-                       tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC;
-                       tx_buf = mtk_desc_to_tx_buf(ring, txd);
-                       memset(tx_buf, 0, sizeof(*tx_buf));
-
-                       tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
-                       dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
-                       dma_unmap_len_set(tx_buf, dma_len0, frag_map_size);
-                       frag_size -= frag_map_size;
-                       offset += frag_map_size;
-               }
-       }
-
-       /* store skb to cleanup */
-       tx_buf->skb = skb;
-
-       WRITE_ONCE(itxd->txd4, txd4);
-       WRITE_ONCE(itxd->txd3, (QDMA_TX_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
-                               (!nr_frags * TX_DMA_LS0)));
-
-       netdev_sent_queue(dev, skb->len);
-       skb_tx_timestamp(skb);
-
-       ring->tx_next_free = mtk_tx_next_qdma(ring, txd);
-       atomic_sub(n_desc, &ring->tx_free_count);
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more)
-               mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR);
-
-       return 0;
-
-err_dma:
-       do {
-               tx_buf = mtk_desc_to_tx_buf(ring, txd);
-
-               /* unmap dma */
-               mtk_txd_unmap(&dev->dev, tx_buf);
-
-               itxd->txd3 = TX_DMA_DESP2_DEF;
-               itxd = mtk_tx_next_qdma(ring, itxd);
-       } while (itxd != txd);
-
-       return -ENOMEM;
-}
-
-static inline int mtk_cal_txd_req(struct sk_buff *skb)
-{
-       int i, nfrags;
-       struct skb_frag_struct *frag;
-
-       nfrags = 1;
-       if (skb_is_gso(skb)) {
-               for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-                       frag = &skb_shinfo(skb)->frags[i];
-                       nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN);
-               }
-       } else {
-               nfrags += skb_shinfo(skb)->nr_frags;
-       }
-
-       return DIV_ROUND_UP(nfrags, 2);
-}
-
-static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct net_device_stats *stats = &dev->stats;
-       int tx_num;
-       int len = skb->len;
-       bool gso = false;
-
-       tx_num = mtk_cal_txd_req(skb);
-       if (unlikely(atomic_read(&ring->tx_free_count) <= tx_num)) {
-               netif_stop_queue(dev);
-               netif_err(eth, tx_queued, dev,
-                         "Tx Ring full when queue awake!\n");
-               return NETDEV_TX_BUSY;
-       }
-
-       /* TSO: fill MSS info in tcp checksum field */
-       if (skb_is_gso(skb)) {
-               if (skb_cow_head(skb, 0)) {
-                       netif_warn(eth, tx_err, dev,
-                                  "GSO expand head fail.\n");
-                       goto drop;
-               }
-
-               if (skb_shinfo(skb)->gso_type &
-                               (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
-                       gso = true;
-                       tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
-               }
-       }
-
-       if (ring->tx_map(skb, dev, tx_num, ring, gso) < 0)
-               goto drop;
-
-       stats->tx_packets++;
-       stats->tx_bytes += len;
-
-       if (unlikely(atomic_read(&ring->tx_free_count) <= ring->tx_thresh)) {
-               netif_stop_queue(dev);
-               smp_mb();
-               if (unlikely(atomic_read(&ring->tx_free_count) >
-                            ring->tx_thresh))
-                       netif_wake_queue(dev);
-       }
-
-       return NETDEV_TX_OK;
-
-drop:
-       stats->tx_dropped++;
-       dev_kfree_skb(skb);
-       return NETDEV_TX_OK;
-}
-
-static int mtk_poll_rx(struct napi_struct *napi, int budget,
-                      struct mtk_eth *eth, u32 rx_intr)
-{
-       struct mtk_soc_data *soc = eth->soc;
-       struct mtk_rx_ring *ring = &eth->rx_ring[0];
-       int idx = ring->rx_calc_idx;
-       u32 checksum_bit;
-       struct sk_buff *skb;
-       u8 *data, *new_data;
-       struct mtk_rx_dma *rxd, trxd;
-       int done = 0, pad;
-
-       if (eth->soc->hw_features & NETIF_F_RXCSUM)
-               checksum_bit = soc->checksum_bit;
-       else
-               checksum_bit = 0;
-
-       if (eth->soc->rx_2b_offset)
-               pad = 0;
-       else
-               pad = NET_IP_ALIGN;
-
-       while (done < budget) {
-               struct net_device *netdev;
-               unsigned int pktlen;
-               dma_addr_t dma_addr;
-               int mac = 0;
-
-               idx = NEXT_RX_DESP_IDX(idx);
-               rxd = &ring->rx_dma[idx];
-               data = ring->rx_data[idx];
-
-               mtk_get_rxd(&trxd, rxd);
-               if (!(trxd.rxd2 & RX_DMA_DONE))
-                       break;
-
-               /* find out which mac the packet come from. values start at 1 */
-               if (eth->soc->mac_count > 1) {
-                       mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
-                             RX_DMA_FPORT_MASK;
-                       mac--;
-                       if (mac < 0 || mac >= eth->soc->mac_count)
-                               goto release_desc;
-               }
-
-               netdev = eth->netdev[mac];
-
-               /* alloc new buffer */
-               new_data = napi_alloc_frag(ring->frag_size);
-               if (unlikely(!new_data || !netdev)) {
-                       netdev->stats.rx_dropped++;
-                       goto release_desc;
-               }
-               dma_addr = dma_map_single(&netdev->dev,
-                                         new_data + NET_SKB_PAD + pad,
-                                         ring->rx_buf_size,
-                                         DMA_FROM_DEVICE);
-               if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
-                       skb_free_frag(new_data);
-                       goto release_desc;
-               }
-
-               /* receive data */
-               skb = build_skb(data, ring->frag_size);
-               if (unlikely(!skb)) {
-                       put_page(virt_to_head_page(new_data));
-                       goto release_desc;
-               }
-               skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
-
-               dma_unmap_single(&netdev->dev, trxd.rxd1,
-                                ring->rx_buf_size, DMA_FROM_DEVICE);
-               pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
-               skb->dev = netdev;
-               skb_put(skb, pktlen);
-               if (trxd.rxd4 & checksum_bit)
-                       skb->ip_summed = CHECKSUM_UNNECESSARY;
-               else
-                       skb_checksum_none_assert(skb);
-               skb->protocol = eth_type_trans(skb, netdev);
-
-               netdev->stats.rx_packets++;
-               netdev->stats.rx_bytes += pktlen;
-
-               if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX &&
-                   RX_DMA_VID(trxd.rxd3))
-                       __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
-                                              RX_DMA_VID(trxd.rxd3));
-               napi_gro_receive(napi, skb);
-
-               ring->rx_data[idx] = new_data;
-               rxd->rxd1 = (unsigned int)dma_addr;
-
-release_desc:
-               if (eth->soc->rx_sg_dma)
-                       rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size);
-               else
-                       rxd->rxd2 = RX_DMA_LSO;
-
-               ring->rx_calc_idx = idx;
-               /* make sure that all changes to the dma ring are flushed before
-                * we continue
-                */
-               wmb();
-               if (eth->soc->dma_type == MTK_QDMA)
-                       mtk_w32(eth, ring->rx_calc_idx, MTK_QRX_CRX_IDX0);
-               else
-                       mtk_reg_w32(eth, ring->rx_calc_idx,
-                                   MTK_REG_RX_CALC_IDX0);
-               done++;
-       }
-
-       if (done < budget)
-               mtk_irq_ack(eth, rx_intr);
-
-       return done;
-}
-
-static int mtk_pdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-       struct sk_buff *skb;
-       struct mtk_tx_buf *tx_buf;
-       int done = 0;
-       u32 idx, hwidx;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       unsigned int bytes = 0;
-
-       idx = ring->tx_free_idx;
-       hwidx = mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0);
-
-       while ((idx != hwidx) && budget) {
-               tx_buf = &ring->tx_buf[idx];
-               skb = tx_buf->skb;
-
-               if (!skb)
-                       break;
-
-               if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-                       bytes += skb->len;
-                       done++;
-                       budget--;
-               }
-               mtk_txd_unmap(eth->dev, tx_buf);
-               idx = NEXT_TX_DESP_IDX(idx);
-       }
-       ring->tx_free_idx = idx;
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-
-       /* read hw index again make sure no new tx packet */
-       if (idx != hwidx || idx != mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0))
-               *tx_again = 1;
-
-       if (done)
-               netdev_completed_queue(*eth->netdev, done, bytes);
-
-       return done;
-}
-
-static int mtk_qdma_tx_poll(struct mtk_eth *eth, int budget, bool *tx_again)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct mtk_tx_dma *desc;
-       struct sk_buff *skb;
-       struct mtk_tx_buf *tx_buf;
-       int total = 0, done[MTK_MAX_DEVS];
-       unsigned int bytes[MTK_MAX_DEVS];
-       u32 cpu, dma;
-       int i;
-
-       memset(done, 0, sizeof(done));
-       memset(bytes, 0, sizeof(bytes));
-
-       cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
-       dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
-
-       desc = mtk_qdma_phys_to_virt(ring, cpu);
-
-       while ((cpu != dma) && budget) {
-               u32 next_cpu = desc->txd2;
-               int mac;
-
-               desc = mtk_tx_next_qdma(ring, desc);
-               if ((desc->txd3 & QDMA_TX_OWNER_CPU) == 0)
-                       break;
-
-               mac = (desc->txd4 >> TX_DMA_FPORT_SHIFT) &
-                      TX_DMA_FPORT_MASK;
-               mac--;
-
-               tx_buf = mtk_desc_to_tx_buf(ring, desc);
-               skb = tx_buf->skb;
-               if (!skb)
-                       break;
-
-               if (skb != (struct sk_buff *)DMA_DUMMY_DESC) {
-                       bytes[mac] += skb->len;
-                       done[mac]++;
-                       budget--;
-               }
-               mtk_txd_unmap(eth->dev, tx_buf);
-
-               ring->tx_last_free->txd2 = next_cpu;
-               ring->tx_last_free = desc;
-               atomic_inc(&ring->tx_free_count);
-
-               cpu = next_cpu;
-       }
-
-       mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
-
-       /* read hw index again make sure no new tx packet */
-       if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
-               *tx_again = true;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               if (!done[i])
-                       continue;
-               netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
-               total += done[i];
-       }
-
-       return total;
-}
-
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, u32 tx_intr,
-                      bool *tx_again)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       struct net_device *netdev = eth->netdev[0];
-       int done;
-
-       done = eth->tx_ring.tx_poll(eth, budget, tx_again);
-       if (!*tx_again)
-               mtk_irq_ack(eth, tx_intr);
-
-       if (!done)
-               return 0;
-
-       smp_mb();
-       if (unlikely(!netif_queue_stopped(netdev)))
-               return done;
-
-       if (atomic_read(&ring->tx_free_count) > ring->tx_thresh)
-               netif_wake_queue(netdev);
-
-       return done;
-}
-
-static void mtk_stats_update(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               if (!eth->mac[i] || !eth->mac[i]->hw_stats)
-                       continue;
-               if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
-                       mtk_stats_update_mac(eth->mac[i]);
-                       spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
-               }
-       }
-}
-
-static int mtk_poll(struct napi_struct *napi, int budget)
-{
-       struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
-       u32 status, mtk_status, mask, tx_intr, rx_intr, status_intr;
-       int tx_done, rx_done;
-       bool tx_again = false;
-
-       status = mtk_irq_pending(eth);
-       mtk_status = mtk_irq_pending_status(eth);
-       tx_intr = eth->soc->tx_int;
-       rx_intr = eth->soc->rx_int;
-       status_intr = eth->soc->status_int;
-       tx_done = 0;
-       rx_done = 0;
-       tx_again = 0;
-
-       if (status & tx_intr)
-               tx_done = mtk_poll_tx(eth, budget, tx_intr, &tx_again);
-
-       if (status & rx_intr)
-               rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
-
-       if (unlikely(mtk_status & status_intr)) {
-               mtk_stats_update(eth);
-               mtk_irq_ack_status(eth, status_intr);
-       }
-
-       if (unlikely(netif_msg_intr(eth))) {
-               mask = mtk_irq_enabled(eth);
-               netdev_info(eth->netdev[0],
-                           "done tx %d, rx %d, intr 0x%08x/0x%x\n",
-                           tx_done, rx_done, status, mask);
-       }
-
-       if (tx_again || rx_done == budget)
-               return budget;
-
-       status = mtk_irq_pending(eth);
-       if (status & (tx_intr | rx_intr))
-               return budget;
-
-       napi_complete(napi);
-       mtk_irq_enable(eth, tx_intr | rx_intr);
-
-       return rx_done;
-}
-
-static int mtk_pdma_tx_alloc(struct mtk_eth *eth)
-{
-       int i;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-
-       ring->tx_ring_size = eth->soc->dma_ring_size;
-       ring->tx_free_idx = 0;
-       ring->tx_next_idx = 0;
-       ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-                             MAX_SKB_FRAGS);
-
-       ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-                              GFP_KERNEL);
-       if (!ring->tx_buf)
-               goto no_tx_mem;
-
-       ring->tx_dma =
-               dma_alloc_coherent(eth->dev,
-                                  ring->tx_ring_size * sizeof(*ring->tx_dma),
-                                  &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->tx_dma)
-               goto no_tx_mem;
-
-       for (i = 0; i < ring->tx_ring_size; i++) {
-               ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF;
-               ring->tx_dma[i].txd4 = eth->soc->txd4;
-       }
-
-       atomic_set(&ring->tx_free_count, mtk_pdma_empty_txd(ring));
-       ring->tx_map = mtk_pdma_tx_map;
-       ring->tx_poll = mtk_pdma_tx_poll;
-       ring->tx_clean = mtk_pdma_tx_clean;
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       mtk_reg_w32(eth, ring->tx_phys, MTK_REG_TX_BASE_PTR0);
-       mtk_reg_w32(eth, ring->tx_ring_size, MTK_REG_TX_MAX_CNT0);
-       mtk_reg_w32(eth, 0, MTK_REG_TX_CTX_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DTX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-
-no_tx_mem:
-       return -ENOMEM;
-}
-
-static int mtk_qdma_tx_alloc_tx(struct mtk_eth *eth)
-{
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i, sz = sizeof(*ring->tx_dma);
-
-       ring->tx_ring_size = eth->soc->dma_ring_size;
-       ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf),
-                              GFP_KERNEL);
-       if (!ring->tx_buf)
-               goto no_tx_mem;
-
-       ring->tx_dma = dma_alloc_coherent(eth->dev, ring->tx_ring_size * sz,
-                                         &ring->tx_phys,
-                                         GFP_ATOMIC | __GFP_ZERO);
-       if (!ring->tx_dma)
-               goto no_tx_mem;
-
-       for (i = 0; i < ring->tx_ring_size; i++) {
-               int next = (i + 1) % ring->tx_ring_size;
-               u32 next_ptr = ring->tx_phys + next * sz;
-
-               ring->tx_dma[i].txd2 = next_ptr;
-               ring->tx_dma[i].txd3 = TX_DMA_DESP2_DEF;
-       }
-
-       atomic_set(&ring->tx_free_count, ring->tx_ring_size - 2);
-       ring->tx_next_free = &ring->tx_dma[0];
-       ring->tx_last_free = &ring->tx_dma[ring->tx_ring_size - 2];
-       ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2,
-                             MAX_SKB_FRAGS);
-
-       ring->tx_map = mtk_qdma_tx_map;
-       ring->tx_poll = mtk_qdma_tx_poll;
-       ring->tx_clean = mtk_qdma_tx_clean;
-
-       /* make sure that all changes to the dma ring are flushed before we
-        * continue
-        */
-       wmb();
-
-       mtk_w32(eth, ring->tx_phys, MTK_QTX_CTX_PTR);
-       mtk_w32(eth, ring->tx_phys, MTK_QTX_DTX_PTR);
-       mtk_w32(eth,
-               ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-               MTK_QTX_CRX_PTR);
-       mtk_w32(eth,
-               ring->tx_phys + ((ring->tx_ring_size - 1) * sz),
-               MTK_QTX_DRX_PTR);
-
-       return 0;
-
-no_tx_mem:
-       return -ENOMEM;
-}
-
-static int mtk_qdma_init(struct mtk_eth *eth, int ring)
-{
-       int err;
-
-       err = mtk_init_fq_dma(eth);
-       if (err)
-               return err;
-
-       err = mtk_qdma_tx_alloc_tx(eth);
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, &eth->rx_ring[ring]);
-       if (err)
-               return err;
-
-       mtk_w32(eth, eth->rx_ring[ring].rx_phys, MTK_QRX_BASE_PTR0);
-       mtk_w32(eth, eth->rx_ring[ring].rx_ring_size, MTK_QRX_MAX_CNT0);
-       mtk_w32(eth, eth->rx_ring[ring].rx_calc_idx, MTK_QRX_CRX_IDX0);
-       mtk_w32(eth, MTK_PST_DRX_IDX0, MTK_QDMA_RST_IDX);
-       mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, MTK_QTX_CFG(0));
-
-       /* Enable random early drop and set drop threshold automatically */
-       mtk_w32(eth, 0x174444, MTK_QDMA_FC_THRES);
-       mtk_w32(eth, 0x0, MTK_QDMA_HRED2);
-
-       return 0;
-}
-
-static int mtk_pdma_qdma_init(struct mtk_eth *eth)
-{
-       int err = mtk_qdma_init(eth, 1);
-
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, &eth->rx_ring[0]);
-       if (err)
-               return err;
-
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_phys, MTK_REG_RX_BASE_PTR0);
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_ring_size, MTK_REG_RX_MAX_CNT0);
-       mtk_reg_w32(eth, eth->rx_ring[0].rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-}
-
-static int mtk_pdma_init(struct mtk_eth *eth)
-{
-       struct mtk_rx_ring *ring = &eth->rx_ring[0];
-       int err;
-
-       err = mtk_pdma_tx_alloc(eth);
-       if (err)
-               return err;
-
-       err = mtk_dma_rx_alloc(eth, ring);
-       if (err)
-               return err;
-
-       mtk_reg_w32(eth, ring->rx_phys, MTK_REG_RX_BASE_PTR0);
-       mtk_reg_w32(eth, ring->rx_ring_size, MTK_REG_RX_MAX_CNT0);
-       mtk_reg_w32(eth, ring->rx_calc_idx, MTK_REG_RX_CALC_IDX0);
-       mtk_reg_w32(eth, MTK_PST_DRX_IDX0, MTK_REG_PDMA_RST_CFG);
-
-       return 0;
-}
-
-static void mtk_dma_free(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++)
-               if (eth->netdev[i])
-                       netdev_reset_queue(eth->netdev[i]);
-       eth->tx_ring.tx_clean(eth);
-       mtk_clean_rx(eth, &eth->rx_ring[0]);
-       mtk_clean_rx(eth, &eth->rx_ring[1]);
-       kfree(eth->scratch_head);
-}
-
-static void mtk_tx_timeout(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct mtk_tx_ring *ring = &eth->tx_ring;
-
-       eth->netdev[mac->id]->stats.tx_errors++;
-       netif_err(eth, tx_err, dev,
-                 "transmit timed out\n");
-       if (eth->soc->dma_type & MTK_PDMA) {
-               netif_info(eth, drv, dev, "pdma_cfg:%08x\n",
-                          mtk_reg_r32(eth, MTK_REG_PDMA_GLO_CFG));
-               netif_info(eth, drv, dev,
-                          "tx_ring=%d, base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n",
-                          0, mtk_reg_r32(eth, MTK_REG_TX_BASE_PTR0),
-                          mtk_reg_r32(eth, MTK_REG_TX_MAX_CNT0),
-                          mtk_reg_r32(eth, MTK_REG_TX_CTX_IDX0),
-                          mtk_reg_r32(eth, MTK_REG_TX_DTX_IDX0),
-                          ring->tx_free_idx,
-                          ring->tx_next_idx);
-       }
-       if (eth->soc->dma_type & MTK_QDMA) {
-               netif_info(eth, drv, dev, "qdma_cfg:%08x\n",
-                          mtk_r32(eth, MTK_QDMA_GLO_CFG));
-               netif_info(eth, drv, dev,
-                          "tx_ring=%d, ctx=%08x, dtx=%08x, crx=%08x, drx=%08x, free=%hu\n",
-                          0, mtk_r32(eth, MTK_QTX_CTX_PTR),
-                          mtk_r32(eth, MTK_QTX_DTX_PTR),
-                          mtk_r32(eth, MTK_QTX_CRX_PTR),
-                          mtk_r32(eth, MTK_QTX_DRX_PTR),
-                          atomic_read(&ring->tx_free_count));
-       }
-       netif_info(eth, drv, dev,
-                  "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n",
-                  0, mtk_reg_r32(eth, MTK_REG_RX_BASE_PTR0),
-                  mtk_reg_r32(eth, MTK_REG_RX_MAX_CNT0),
-                  mtk_reg_r32(eth, MTK_REG_RX_CALC_IDX0),
-                  mtk_reg_r32(eth, MTK_REG_RX_DRX_IDX0));
-
-       schedule_work(&mac->pending_work);
-}
-
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
-{
-       struct mtk_eth *eth = _eth;
-       u32 status, int_mask;
-
-       status = mtk_irq_pending(eth);
-       if (unlikely(!status))
-               return IRQ_NONE;
-
-       int_mask = (eth->soc->rx_int | eth->soc->tx_int);
-       if (likely(status & int_mask)) {
-               if (likely(napi_schedule_prep(&eth->rx_napi)))
-                       __napi_schedule(&eth->rx_napi);
-       } else {
-               mtk_irq_ack(eth, status);
-       }
-       mtk_irq_disable(eth, int_mask);
-
-       return IRQ_HANDLED;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void mtk_poll_controller(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       u32 int_mask = eth->soc->tx_int | eth->soc->rx_int;
-
-       mtk_irq_disable(eth, int_mask);
-       mtk_handle_irq(dev->irq, dev);
-       mtk_irq_enable(eth, int_mask);
-}
-#endif
-
-int mtk_set_clock_cycle(struct mtk_eth *eth)
-{
-       unsigned long sysclk = eth->sysclk;
-
-       sysclk /= MTK_US_CYC_CNT_DIVISOR;
-       sysclk <<= MTK_US_CYC_CNT_SHIFT;
-
-       mtk_w32(eth, (mtk_r32(eth, MTK_GLO_CFG) &
-                       ~(MTK_US_CYC_CNT_MASK << MTK_US_CYC_CNT_SHIFT)) |
-                       sysclk,
-                       MTK_GLO_CFG);
-       return 0;
-}
-
-void mtk_fwd_config(struct mtk_eth *eth)
-{
-       u32 fwd_cfg;
-
-       fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-
-       /* disable jumbo frame */
-       if (eth->soc->jumbo_frame)
-               fwd_cfg &= ~MTK_GDM1_JMB_EN;
-
-       /* set unicast/multicast/broadcast frame to cpu */
-       fwd_cfg &= ~0xffff;
-
-       mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-}
-
-void mtk_csum_config(struct mtk_eth *eth)
-{
-       if (eth->soc->hw_features & NETIF_F_RXCSUM)
-               mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) |
-                       (MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-                       MTK_GDMA1_FWD_CFG);
-       else
-               mtk_w32(eth, mtk_r32(eth, MTK_GDMA1_FWD_CFG) &
-                       ~(MTK_GDM1_ICS_EN | MTK_GDM1_TCS_EN | MTK_GDM1_UCS_EN),
-                       MTK_GDMA1_FWD_CFG);
-       if (eth->soc->hw_features & NETIF_F_IP_CSUM)
-               mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) |
-                       (MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-                       MTK_CDMA_CSG_CFG);
-       else
-               mtk_w32(eth, mtk_r32(eth, MTK_CDMA_CSG_CFG) &
-                       ~(MTK_ICS_GEN_EN | MTK_TCS_GEN_EN | MTK_UCS_GEN_EN),
-                       MTK_CDMA_CSG_CFG);
-}
-
-static int mtk_start_dma(struct mtk_eth *eth)
-{
-       unsigned long flags;
-       u32 val;
-       int err;
-
-       if (eth->soc->dma_type == MTK_PDMA)
-               err = mtk_pdma_init(eth);
-       else if (eth->soc->dma_type == MTK_QDMA)
-               err = mtk_qdma_init(eth, 0);
-       else
-               err = mtk_pdma_qdma_init(eth);
-       if (err) {
-               mtk_dma_free(eth);
-               return err;
-       }
-
-       spin_lock_irqsave(&eth->page_lock, flags);
-
-       val = MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN;
-       if (eth->soc->rx_2b_offset)
-               val |= MTK_RX_2B_OFFSET;
-       val |= eth->soc->pdma_glo_cfg;
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_reg_w32(eth, val, MTK_REG_PDMA_GLO_CFG);
-
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, val, MTK_QDMA_GLO_CFG);
-
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
-       return 0;
-}
-
-static int mtk_open(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       dma_coerce_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32));
-
-       if (!atomic_read(&eth->dma_refcnt)) {
-               int err = mtk_start_dma(eth);
-
-               if (err)
-                       return err;
-
-               napi_enable(&eth->rx_napi);
-               mtk_irq_enable(eth, eth->soc->tx_int | eth->soc->rx_int);
-       }
-       atomic_inc(&eth->dma_refcnt);
-
-       if (eth->phy)
-               eth->phy->start(mac);
-
-       if (eth->soc->has_carrier && eth->soc->has_carrier(eth))
-               netif_carrier_on(dev);
-
-       netif_start_queue(dev);
-       eth->soc->fwd_config(eth);
-
-       return 0;
-}
-
-static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
-{
-       unsigned long flags;
-       u32 val;
-       int i;
-
-       /* stop the dma enfine */
-       spin_lock_irqsave(&eth->page_lock, flags);
-       val = mtk_r32(eth, glo_cfg);
-       mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
-               glo_cfg);
-       spin_unlock_irqrestore(&eth->page_lock, flags);
-
-       /* wait for dma stop */
-       for (i = 0; i < 10; i++) {
-               val = mtk_r32(eth, glo_cfg);
-               if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
-                       msleep(20);
-                       continue;
-               }
-               break;
-       }
-}
-
-static int mtk_stop(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       netif_tx_disable(dev);
-       if (eth->phy)
-               eth->phy->stop(mac);
-
-       if (!atomic_dec_and_test(&eth->dma_refcnt))
-               return 0;
-
-       mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-       napi_disable(&eth->rx_napi);
-
-       if (eth->soc->dma_type & MTK_PDMA)
-               mtk_stop_dma(eth, mtk_reg_table[MTK_REG_PDMA_GLO_CFG]);
-
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
-
-       mtk_dma_free(eth);
-
-       return 0;
-}
-
-static int __init mtk_init_hw(struct mtk_eth *eth)
-{
-       int i, err;
-
-       eth->soc->reset_fe(eth);
-
-       if (eth->soc->switch_init)
-               if (eth->soc->switch_init(eth)) {
-                       dev_err(eth->dev, "failed to initialize switch core\n");
-                       return -ENODEV;
-               }
-
-       err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
-                              dev_name(eth->dev), eth);
-       if (err)
-               return err;
-
-       err = mtk_mdio_init(eth);
-       if (err)
-               return err;
-
-       /* disable delay and normal interrupt */
-       mtk_reg_w32(eth, 0, MTK_REG_DLY_INT_CFG);
-       if (eth->soc->dma_type & MTK_QDMA)
-               mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
-       mtk_irq_disable(eth, eth->soc->tx_int | eth->soc->rx_int);
-
-       /* frame engine will push VLAN tag regarding to VIDX field in Tx desc */
-       if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-               for (i = 0; i < 16; i += 2)
-                       mtk_w32(eth, ((i + 1) << 16) + i,
-                               mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE] +
-                               (i * 2));
-
-       if (eth->soc->fwd_config(eth))
-               dev_err(eth->dev, "unable to get clock\n");
-
-       if (mtk_reg_table[MTK_REG_MTK_RST_GL]) {
-               mtk_reg_w32(eth, 1, MTK_REG_MTK_RST_GL);
-               mtk_reg_w32(eth, 0, MTK_REG_MTK_RST_GL);
-       }
-
-       return 0;
-}
-
-static int __init mtk_init(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       struct device_node *port;
-       const char *mac_addr;
-       int err;
-
-       mac_addr = of_get_mac_address(mac->of_node);
-       if (mac_addr)
-               ether_addr_copy(dev->dev_addr, mac_addr);
-
-       /* If the mac address is invalid, use random mac address  */
-       if (!is_valid_ether_addr(dev->dev_addr)) {
-               eth_hw_addr_random(dev);
-               dev_err(eth->dev, "generated random MAC address %pM\n",
-                       dev->dev_addr);
-       }
-       mac->hw->soc->set_mac(mac, dev->dev_addr);
-
-       if (eth->soc->port_init)
-               for_each_child_of_node(mac->of_node, port)
-                       if (of_device_is_compatible(port,
-                                                   "mediatek,eth-port") &&
-                           of_device_is_available(port))
-                               eth->soc->port_init(eth, mac, port);
-
-       if (eth->phy) {
-               err = eth->phy->connect(mac);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static void mtk_uninit(struct net_device *dev)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-
-       if (eth->phy)
-               eth->phy->disconnect(mac);
-       mtk_mdio_cleanup(eth);
-
-       mtk_irq_disable(eth, ~0);
-       free_irq(dev->irq, dev);
-}
-
-static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-
-       if (!mac->phy_dev)
-               return -ENODEV;
-
-       switch (cmd) {
-       case SIOCGMIIPHY:
-       case SIOCGMIIREG:
-       case SIOCSMIIREG:
-               return phy_mii_ioctl(mac->phy_dev, ifr, cmd);
-       default:
-               break;
-       }
-
-       return -EOPNOTSUPP;
-}
-
-static int mtk_change_mtu(struct net_device *dev, int new_mtu)
-{
-       struct mtk_mac *mac = netdev_priv(dev);
-       struct mtk_eth *eth = mac->hw;
-       int frag_size, old_mtu;
-       u32 fwd_cfg;
-
-       if (!eth->soc->jumbo_frame)
-               return eth_change_mtu(dev, new_mtu);
-
-       frag_size = mtk_max_frag_size(new_mtu);
-       if (new_mtu < 68 || frag_size > PAGE_SIZE)
-               return -EINVAL;
-
-       old_mtu = dev->mtu;
-       dev->mtu = new_mtu;
-
-       /* return early if the buffer sizes will not change */
-       if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
-               return 0;
-       if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN)
-               return 0;
-
-       if (new_mtu <= ETH_DATA_LEN)
-               eth->rx_ring[0].frag_size = mtk_max_frag_size(ETH_DATA_LEN);
-       else
-               eth->rx_ring[0].frag_size = PAGE_SIZE;
-       eth->rx_ring[0].rx_buf_size =
-                               mtk_max_buf_size(eth->rx_ring[0].frag_size);
-
-       if (!netif_running(dev))
-               return 0;
-
-       mtk_stop(dev);
-       fwd_cfg = mtk_r32(eth, MTK_GDMA1_FWD_CFG);
-       if (new_mtu <= ETH_DATA_LEN) {
-               fwd_cfg &= ~MTK_GDM1_JMB_EN;
-       } else {
-               fwd_cfg &= ~(MTK_GDM1_JMB_LEN_MASK << MTK_GDM1_JMB_LEN_SHIFT);
-               fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) <<
-                               MTK_GDM1_JMB_LEN_SHIFT) | MTK_GDM1_JMB_EN;
-       }
-       mtk_w32(eth, fwd_cfg, MTK_GDMA1_FWD_CFG);
-
-       return mtk_open(dev);
-}
-
-static void mtk_pending_work(struct work_struct *work)
-{
-       struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
-       struct mtk_eth *eth = mac->hw;
-       struct net_device *dev = eth->netdev[mac->id];
-       int err;
-
-       rtnl_lock();
-       mtk_stop(dev);
-
-       err = mtk_open(dev);
-       if (err) {
-               netif_alert(eth, ifup, dev,
-                           "Driver up/down cycle failed, closing device.\n");
-               dev_close(dev);
-       }
-       rtnl_unlock();
-}
-
-static int mtk_cleanup(struct mtk_eth *eth)
-{
-       int i;
-
-       for (i = 0; i < eth->soc->mac_count; i++) {
-               struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
-               if (!eth->netdev[i])
-                       continue;
-
-               unregister_netdev(eth->netdev[i]);
-               free_netdev(eth->netdev[i]);
-               cancel_work_sync(&mac->pending_work);
-       }
-
-       return 0;
-}
-
-static const struct net_device_ops mtk_netdev_ops = {
-       .ndo_init               = mtk_init,
-       .ndo_uninit             = mtk_uninit,
-       .ndo_open               = mtk_open,
-       .ndo_stop               = mtk_stop,
-       .ndo_start_xmit         = mtk_start_xmit,
-       .ndo_set_mac_address    = mtk_set_mac_address,
-       .ndo_validate_addr      = eth_validate_addr,
-       .ndo_do_ioctl           = mtk_do_ioctl,
-       .ndo_change_mtu         = mtk_change_mtu,
-       .ndo_tx_timeout         = mtk_tx_timeout,
-       .ndo_get_stats64        = mtk_get_stats64,
-       .ndo_vlan_rx_add_vid    = mtk_vlan_rx_add_vid,
-       .ndo_vlan_rx_kill_vid   = mtk_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = mtk_poll_controller,
-#endif
-};
-
-static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
-{
-       struct mtk_mac *mac;
-       const __be32 *_id = of_get_property(np, "reg", NULL);
-       int id, err;
-
-       if (!_id) {
-               dev_err(eth->dev, "missing mac id\n");
-               return -EINVAL;
-       }
-       id = be32_to_cpup(_id);
-       if (id >= eth->soc->mac_count || eth->netdev[id]) {
-               dev_err(eth->dev, "%d is not a valid mac id\n", id);
-               return -EINVAL;
-       }
-
-       eth->netdev[id] = alloc_etherdev(sizeof(*mac));
-       if (!eth->netdev[id]) {
-               dev_err(eth->dev, "alloc_etherdev failed\n");
-               return -ENOMEM;
-       }
-       mac = netdev_priv(eth->netdev[id]);
-       eth->mac[id] = mac;
-       mac->id = id;
-       mac->hw = eth;
-       mac->of_node = np;
-       INIT_WORK(&mac->pending_work, mtk_pending_work);
-
-       if (mtk_reg_table[MTK_REG_MTK_COUNTER_BASE]) {
-               mac->hw_stats = devm_kzalloc(eth->dev,
-                                            sizeof(*mac->hw_stats),
-                                            GFP_KERNEL);
-               if (!mac->hw_stats) {
-                       err = -ENOMEM;
-                       goto free_netdev;
-               }
-               spin_lock_init(&mac->hw_stats->stats_lock);
-               mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
-       }
-
-       SET_NETDEV_DEV(eth->netdev[id], eth->dev);
-       eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
-       eth->netdev[id]->base_addr = (unsigned long)eth->base;
-
-       if (eth->soc->init_data)
-               eth->soc->init_data(eth->soc, eth->netdev[id]);
-
-       eth->netdev[id]->vlan_features = eth->soc->hw_features &
-               ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX);
-       eth->netdev[id]->features |= eth->soc->hw_features;
-
-       if (mtk_reg_table[MTK_REG_MTK_DMA_VID_BASE])
-               eth->netdev[id]->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
-       mtk_set_ethtool_ops(eth->netdev[id]);
-
-       err = register_netdev(eth->netdev[id]);
-       if (err) {
-               dev_err(eth->dev, "error bringing up device\n");
-               err = -ENOMEM;
-               goto free_netdev;
-       }
-       eth->netdev[id]->irq = eth->irq;
-       netif_info(eth, probe, eth->netdev[id],
-                  "mediatek frame engine at 0x%08lx, irq %d\n",
-                  eth->netdev[id]->base_addr, eth->netdev[id]->irq);
-
-       return 0;
-
-free_netdev:
-       free_netdev(eth->netdev[id]);
-       return err;
-}
-
-static int mtk_probe(struct platform_device *pdev)
-{
-       struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       const struct of_device_id *match;
-       struct device_node *mac_np;
-       struct mtk_soc_data *soc;
-       struct mtk_eth *eth;
-       struct clk *sysclk;
-       int err;
-
-       device_reset(&pdev->dev);
-
-       match = of_match_device(of_mtk_match, &pdev->dev);
-       soc = (struct mtk_soc_data *)match->data;
-
-       if (soc->reg_table)
-               mtk_reg_table = soc->reg_table;
-
-       eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
-       if (!eth)
-               return -ENOMEM;
-
-       eth->base = devm_ioremap_resource(&pdev->dev, res);
-       if (IS_ERR(eth->base))
-               return PTR_ERR(eth->base);
-
-       spin_lock_init(&eth->page_lock);
-
-       eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
-                                                     "mediatek,ethsys");
-       if (IS_ERR(eth->ethsys))
-               return PTR_ERR(eth->ethsys);
-
-       eth->irq = platform_get_irq(pdev, 0);
-       if (eth->irq < 0) {
-               dev_err(&pdev->dev, "no IRQ resource found\n");
-               return -ENXIO;
-       }
-
-       sysclk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(sysclk)) {
-               dev_err(&pdev->dev,
-                       "the clock is not defined in the devicetree\n");
-               return -ENXIO;
-       }
-       eth->sysclk = clk_get_rate(sysclk);
-
-       eth->switch_np = of_parse_phandle(pdev->dev.of_node,
-                                         "mediatek,switch", 0);
-       if (soc->has_switch && !eth->switch_np) {
-               dev_err(&pdev->dev, "failed to read switch phandle\n");
-               return -ENODEV;
-       }
-
-       eth->dev = &pdev->dev;
-       eth->soc = soc;
-       eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
-
-       err = mtk_init_hw(eth);
-       if (err)
-               return err;
-
-       if (eth->soc->mac_count > 1) {
-               for_each_child_of_node(pdev->dev.of_node, mac_np) {
-                       if (!of_device_is_compatible(mac_np,
-                                                    "mediatek,eth-mac"))
-                               continue;
-
-                       if (!of_device_is_available(mac_np))
-                               continue;
-
-                       err = mtk_add_mac(eth, mac_np);
-                       if (err)
-                               goto err_free_dev;
-               }
-
-               init_dummy_netdev(&eth->dummy_dev);
-               netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
-                              soc->napi_weight);
-       } else {
-               err = mtk_add_mac(eth, pdev->dev.of_node);
-               if (err)
-                       goto err_free_dev;
-               netif_napi_add(eth->netdev[0], &eth->rx_napi, mtk_poll,
-                              soc->napi_weight);
-       }
-
-       platform_set_drvdata(pdev, eth);
-
-       return 0;
-
-err_free_dev:
-       mtk_cleanup(eth);
-       return err;
-}
-
-static int mtk_remove(struct platform_device *pdev)
-{
-       struct mtk_eth *eth = platform_get_drvdata(pdev);
-
-       netif_napi_del(&eth->rx_napi);
-       mtk_cleanup(eth);
-       platform_set_drvdata(pdev, NULL);
-
-       return 0;
-}
-
-static struct platform_driver mtk_driver = {
-       .probe = mtk_probe,
-       .remove = mtk_remove,
-       .driver = {
-               .name = "mtk_soc_eth",
-               .of_match_table = of_mtk_match,
-       },
-};
-
-module_platform_driver(mtk_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
-MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/staging/mt7621-eth/mtk_eth_soc.h b/drivers/staging/mt7621-eth/mtk_eth_soc.h
deleted file mode 100644 (file)
index e6ed804..0000000
+++ /dev/null
@@ -1,716 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#ifndef MTK_ETH_H
-#define MTK_ETH_H
-
-#include <linux/mii.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/dma-mapping.h>
-#include <linux/phy.h>
-#include <linux/ethtool.h>
-#include <linux/version.h>
-#include <linux/atomic.h>
-
-/* these registers have different offsets depending on the SoC. we use a lookup
- * table for these
- */
-enum mtk_reg {
-       MTK_REG_PDMA_GLO_CFG = 0,
-       MTK_REG_PDMA_RST_CFG,
-       MTK_REG_DLY_INT_CFG,
-       MTK_REG_TX_BASE_PTR0,
-       MTK_REG_TX_MAX_CNT0,
-       MTK_REG_TX_CTX_IDX0,
-       MTK_REG_TX_DTX_IDX0,
-       MTK_REG_RX_BASE_PTR0,
-       MTK_REG_RX_MAX_CNT0,
-       MTK_REG_RX_CALC_IDX0,
-       MTK_REG_RX_DRX_IDX0,
-       MTK_REG_MTK_INT_ENABLE,
-       MTK_REG_MTK_INT_STATUS,
-       MTK_REG_MTK_DMA_VID_BASE,
-       MTK_REG_MTK_COUNTER_BASE,
-       MTK_REG_MTK_RST_GL,
-       MTK_REG_MTK_INT_STATUS2,
-       MTK_REG_COUNT
-};
-
-/* delayed interrupt bits */
-#define MTK_DELAY_EN_INT       0x80
-#define MTK_DELAY_MAX_INT      0x04
-#define MTK_DELAY_MAX_TOUT     0x04
-#define MTK_DELAY_TIME         20
-#define MTK_DELAY_CHAN         (((MTK_DELAY_EN_INT | MTK_DELAY_MAX_INT) << 8) \
-                                | MTK_DELAY_MAX_TOUT)
-#define MTK_DELAY_INIT         ((MTK_DELAY_CHAN << 16) | MTK_DELAY_CHAN)
-#define MTK_PSE_FQFC_CFG_INIT  0x80504000
-#define MTK_PSE_FQFC_CFG_256Q  0xff908000
-
-/* interrupt bits */
-#define MTK_CNT_PPE_AF         BIT(31)
-#define MTK_CNT_GDM_AF         BIT(29)
-#define MTK_PSE_P2_FC          BIT(26)
-#define MTK_PSE_BUF_DROP       BIT(24)
-#define MTK_GDM_OTHER_DROP     BIT(23)
-#define MTK_PSE_P1_FC          BIT(22)
-#define MTK_PSE_P0_FC          BIT(21)
-#define MTK_PSE_FQ_EMPTY       BIT(20)
-#define MTK_GE1_STA_CHG                BIT(18)
-#define MTK_TX_COHERENT                BIT(17)
-#define MTK_RX_COHERENT                BIT(16)
-#define MTK_TX_DONE_INT3       BIT(11)
-#define MTK_TX_DONE_INT2       BIT(10)
-#define MTK_TX_DONE_INT1       BIT(9)
-#define MTK_TX_DONE_INT0       BIT(8)
-#define MTK_RX_DONE_INT0       BIT(2)
-#define MTK_TX_DLY_INT         BIT(1)
-#define MTK_RX_DLY_INT         BIT(0)
-
-#define MTK_RX_DONE_INT                MTK_RX_DONE_INT0
-#define MTK_TX_DONE_INT                (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
-                                MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
-
-#define RT5350_RX_DLY_INT      BIT(30)
-#define RT5350_TX_DLY_INT      BIT(28)
-#define RT5350_RX_DONE_INT1    BIT(17)
-#define RT5350_RX_DONE_INT0    BIT(16)
-#define RT5350_TX_DONE_INT3    BIT(3)
-#define RT5350_TX_DONE_INT2    BIT(2)
-#define RT5350_TX_DONE_INT1    BIT(1)
-#define RT5350_TX_DONE_INT0    BIT(0)
-
-#define RT5350_RX_DONE_INT     (RT5350_RX_DONE_INT0 | RT5350_RX_DONE_INT1)
-#define RT5350_TX_DONE_INT     (RT5350_TX_DONE_INT0 | RT5350_TX_DONE_INT1 | \
-                                RT5350_TX_DONE_INT2 | RT5350_TX_DONE_INT3)
-
-/* registers */
-#define MTK_GDMA_OFFSET                0x0020
-#define MTK_PSE_OFFSET         0x0040
-#define MTK_GDMA2_OFFSET       0x0060
-#define MTK_CDMA_OFFSET                0x0080
-#define MTK_DMA_VID0           0x00a8
-#define MTK_PDMA_OFFSET                0x0100
-#define MTK_PPE_OFFSET         0x0200
-#define MTK_CMTABLE_OFFSET     0x0400
-#define MTK_POLICYTABLE_OFFSET 0x1000
-
-#define MT7621_GDMA_OFFSET     0x0500
-#define MT7620_GDMA_OFFSET     0x0600
-
-#define RT5350_PDMA_OFFSET     0x0800
-#define RT5350_SDM_OFFSET      0x0c00
-
-#define MTK_MDIO_ACCESS                0x00
-#define MTK_MDIO_CFG           0x04
-#define MTK_GLO_CFG            0x08
-#define MTK_RST_GL             0x0C
-#define MTK_INT_STATUS         0x10
-#define MTK_INT_ENABLE         0x14
-#define MTK_MDIO_CFG2          0x18
-#define MTK_FOC_TS_T           0x1C
-
-#define        MTK_GDMA1_FWD_CFG       (MTK_GDMA_OFFSET + 0x00)
-#define MTK_GDMA1_SCH_CFG      (MTK_GDMA_OFFSET + 0x04)
-#define MTK_GDMA1_SHPR_CFG     (MTK_GDMA_OFFSET + 0x08)
-#define MTK_GDMA1_MAC_ADRL     (MTK_GDMA_OFFSET + 0x0C)
-#define MTK_GDMA1_MAC_ADRH     (MTK_GDMA_OFFSET + 0x10)
-
-#define        MTK_GDMA2_FWD_CFG       (MTK_GDMA2_OFFSET + 0x00)
-#define MTK_GDMA2_SCH_CFG      (MTK_GDMA2_OFFSET + 0x04)
-#define MTK_GDMA2_SHPR_CFG     (MTK_GDMA2_OFFSET + 0x08)
-#define MTK_GDMA2_MAC_ADRL     (MTK_GDMA2_OFFSET + 0x0C)
-#define MTK_GDMA2_MAC_ADRH     (MTK_GDMA2_OFFSET + 0x10)
-
-#define MTK_PSE_FQ_CFG         (MTK_PSE_OFFSET + 0x00)
-#define MTK_CDMA_FC_CFG                (MTK_PSE_OFFSET + 0x04)
-#define MTK_GDMA1_FC_CFG       (MTK_PSE_OFFSET + 0x08)
-#define MTK_GDMA2_FC_CFG       (MTK_PSE_OFFSET + 0x0C)
-
-#define MTK_CDMA_CSG_CFG       (MTK_CDMA_OFFSET + 0x00)
-#define MTK_CDMA_SCH_CFG       (MTK_CDMA_OFFSET + 0x04)
-
-#define        MT7621_GDMA_FWD_CFG(x)  (MT7621_GDMA_OFFSET + (x * 0x1000))
-
-/* FIXME this might be different for different SOCs */
-#define        MT7620_GDMA1_FWD_CFG    (MT7621_GDMA_OFFSET + 0x00)
-
-#define RT5350_TX_BASE_PTR0    (RT5350_PDMA_OFFSET + 0x00)
-#define RT5350_TX_MAX_CNT0     (RT5350_PDMA_OFFSET + 0x04)
-#define RT5350_TX_CTX_IDX0     (RT5350_PDMA_OFFSET + 0x08)
-#define RT5350_TX_DTX_IDX0     (RT5350_PDMA_OFFSET + 0x0C)
-#define RT5350_TX_BASE_PTR1    (RT5350_PDMA_OFFSET + 0x10)
-#define RT5350_TX_MAX_CNT1     (RT5350_PDMA_OFFSET + 0x14)
-#define RT5350_TX_CTX_IDX1     (RT5350_PDMA_OFFSET + 0x18)
-#define RT5350_TX_DTX_IDX1     (RT5350_PDMA_OFFSET + 0x1C)
-#define RT5350_TX_BASE_PTR2    (RT5350_PDMA_OFFSET + 0x20)
-#define RT5350_TX_MAX_CNT2     (RT5350_PDMA_OFFSET + 0x24)
-#define RT5350_TX_CTX_IDX2     (RT5350_PDMA_OFFSET + 0x28)
-#define RT5350_TX_DTX_IDX2     (RT5350_PDMA_OFFSET + 0x2C)
-#define RT5350_TX_BASE_PTR3    (RT5350_PDMA_OFFSET + 0x30)
-#define RT5350_TX_MAX_CNT3     (RT5350_PDMA_OFFSET + 0x34)
-#define RT5350_TX_CTX_IDX3     (RT5350_PDMA_OFFSET + 0x38)
-#define RT5350_TX_DTX_IDX3     (RT5350_PDMA_OFFSET + 0x3C)
-#define RT5350_RX_BASE_PTR0    (RT5350_PDMA_OFFSET + 0x100)
-#define RT5350_RX_MAX_CNT0     (RT5350_PDMA_OFFSET + 0x104)
-#define RT5350_RX_CALC_IDX0    (RT5350_PDMA_OFFSET + 0x108)
-#define RT5350_RX_DRX_IDX0     (RT5350_PDMA_OFFSET + 0x10C)
-#define RT5350_RX_BASE_PTR1    (RT5350_PDMA_OFFSET + 0x110)
-#define RT5350_RX_MAX_CNT1     (RT5350_PDMA_OFFSET + 0x114)
-#define RT5350_RX_CALC_IDX1    (RT5350_PDMA_OFFSET + 0x118)
-#define RT5350_RX_DRX_IDX1     (RT5350_PDMA_OFFSET + 0x11C)
-#define RT5350_PDMA_GLO_CFG    (RT5350_PDMA_OFFSET + 0x204)
-#define RT5350_PDMA_RST_CFG    (RT5350_PDMA_OFFSET + 0x208)
-#define RT5350_DLY_INT_CFG     (RT5350_PDMA_OFFSET + 0x20c)
-#define RT5350_MTK_INT_STATUS  (RT5350_PDMA_OFFSET + 0x220)
-#define RT5350_MTK_INT_ENABLE  (RT5350_PDMA_OFFSET + 0x228)
-#define RT5350_PDMA_SCH_CFG    (RT5350_PDMA_OFFSET + 0x280)
-
-#define MTK_PDMA_GLO_CFG       (MTK_PDMA_OFFSET + 0x00)
-#define MTK_PDMA_RST_CFG       (MTK_PDMA_OFFSET + 0x04)
-#define MTK_PDMA_SCH_CFG       (MTK_PDMA_OFFSET + 0x08)
-#define MTK_DLY_INT_CFG                (MTK_PDMA_OFFSET + 0x0C)
-#define MTK_TX_BASE_PTR0       (MTK_PDMA_OFFSET + 0x10)
-#define MTK_TX_MAX_CNT0                (MTK_PDMA_OFFSET + 0x14)
-#define MTK_TX_CTX_IDX0                (MTK_PDMA_OFFSET + 0x18)
-#define MTK_TX_DTX_IDX0                (MTK_PDMA_OFFSET + 0x1C)
-#define MTK_TX_BASE_PTR1       (MTK_PDMA_OFFSET + 0x20)
-#define MTK_TX_MAX_CNT1                (MTK_PDMA_OFFSET + 0x24)
-#define MTK_TX_CTX_IDX1                (MTK_PDMA_OFFSET + 0x28)
-#define MTK_TX_DTX_IDX1                (MTK_PDMA_OFFSET + 0x2C)
-#define MTK_RX_BASE_PTR0       (MTK_PDMA_OFFSET + 0x30)
-#define MTK_RX_MAX_CNT0                (MTK_PDMA_OFFSET + 0x34)
-#define MTK_RX_CALC_IDX0       (MTK_PDMA_OFFSET + 0x38)
-#define MTK_RX_DRX_IDX0                (MTK_PDMA_OFFSET + 0x3C)
-#define MTK_TX_BASE_PTR2       (MTK_PDMA_OFFSET + 0x40)
-#define MTK_TX_MAX_CNT2                (MTK_PDMA_OFFSET + 0x44)
-#define MTK_TX_CTX_IDX2                (MTK_PDMA_OFFSET + 0x48)
-#define MTK_TX_DTX_IDX2                (MTK_PDMA_OFFSET + 0x4C)
-#define MTK_TX_BASE_PTR3       (MTK_PDMA_OFFSET + 0x50)
-#define MTK_TX_MAX_CNT3                (MTK_PDMA_OFFSET + 0x54)
-#define MTK_TX_CTX_IDX3                (MTK_PDMA_OFFSET + 0x58)
-#define MTK_TX_DTX_IDX3                (MTK_PDMA_OFFSET + 0x5C)
-#define MTK_RX_BASE_PTR1       (MTK_PDMA_OFFSET + 0x60)
-#define MTK_RX_MAX_CNT1                (MTK_PDMA_OFFSET + 0x64)
-#define MTK_RX_CALC_IDX1       (MTK_PDMA_OFFSET + 0x68)
-#define MTK_RX_DRX_IDX1                (MTK_PDMA_OFFSET + 0x6C)
-
-/* Switch DMA configuration */
-#define RT5350_SDM_CFG         (RT5350_SDM_OFFSET + 0x00)
-#define RT5350_SDM_RRING       (RT5350_SDM_OFFSET + 0x04)
-#define RT5350_SDM_TRING       (RT5350_SDM_OFFSET + 0x08)
-#define RT5350_SDM_MAC_ADRL    (RT5350_SDM_OFFSET + 0x0C)
-#define RT5350_SDM_MAC_ADRH    (RT5350_SDM_OFFSET + 0x10)
-#define RT5350_SDM_TPCNT       (RT5350_SDM_OFFSET + 0x100)
-#define RT5350_SDM_TBCNT       (RT5350_SDM_OFFSET + 0x104)
-#define RT5350_SDM_RPCNT       (RT5350_SDM_OFFSET + 0x108)
-#define RT5350_SDM_RBCNT       (RT5350_SDM_OFFSET + 0x10C)
-#define RT5350_SDM_CS_ERR      (RT5350_SDM_OFFSET + 0x110)
-
-#define RT5350_SDM_ICS_EN      BIT(16)
-#define RT5350_SDM_TCS_EN      BIT(17)
-#define RT5350_SDM_UCS_EN      BIT(18)
-
-/* QDMA registers */
-#define MTK_QTX_CFG(x)         (0x1800 + (x * 0x10))
-#define MTK_QTX_SCH(x)         (0x1804 + (x * 0x10))
-#define MTK_QRX_BASE_PTR0      0x1900
-#define MTK_QRX_MAX_CNT0       0x1904
-#define MTK_QRX_CRX_IDX0       0x1908
-#define MTK_QRX_DRX_IDX0       0x190C
-#define MTK_QDMA_GLO_CFG       0x1A04
-#define MTK_QDMA_RST_IDX       0x1A08
-#define MTK_QDMA_DELAY_INT     0x1A0C
-#define MTK_QDMA_FC_THRES      0x1A10
-#define MTK_QMTK_INT_STATUS    0x1A18
-#define MTK_QMTK_INT_ENABLE    0x1A1C
-#define MTK_QDMA_HRED2         0x1A44
-
-#define MTK_QTX_CTX_PTR                0x1B00
-#define MTK_QTX_DTX_PTR                0x1B04
-
-#define MTK_QTX_CRX_PTR                0x1B10
-#define MTK_QTX_DRX_PTR                0x1B14
-
-#define MTK_QDMA_FQ_HEAD       0x1B20
-#define MTK_QDMA_FQ_TAIL       0x1B24
-#define MTK_QDMA_FQ_CNT                0x1B28
-#define MTK_QDMA_FQ_BLEN       0x1B2C
-
-#define QDMA_PAGE_SIZE         2048
-#define QDMA_TX_OWNER_CPU      BIT(31)
-#define QDMA_TX_SWC            BIT(14)
-#define TX_QDMA_SDL(_x)                (((_x) & 0x3fff) << 16)
-#define QDMA_RES_THRES         4
-
-/* MDIO_CFG register bits */
-#define MTK_MDIO_CFG_AUTO_POLL_EN      BIT(29)
-#define MTK_MDIO_CFG_GP1_BP_EN         BIT(16)
-#define MTK_MDIO_CFG_GP1_FRC_EN                BIT(15)
-#define MTK_MDIO_CFG_GP1_SPEED_10      (0 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_100     (1 << 13)
-#define MTK_MDIO_CFG_GP1_SPEED_1000    (2 << 13)
-#define MTK_MDIO_CFG_GP1_DUPLEX                BIT(12)
-#define MTK_MDIO_CFG_GP1_FC_TX         BIT(11)
-#define MTK_MDIO_CFG_GP1_FC_RX         BIT(10)
-#define MTK_MDIO_CFG_GP1_LNK_DWN       BIT(9)
-#define MTK_MDIO_CFG_GP1_AN_FAIL       BIT(8)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_1     (0 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_2     (1 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_4     (2 << 6)
-#define MTK_MDIO_CFG_MDC_CLK_DIV_8     (3 << 6)
-#define MTK_MDIO_CFG_TURBO_MII_FREQ    BIT(5)
-#define MTK_MDIO_CFG_TURBO_MII_MODE    BIT(4)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_0     (0 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_200   (1 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_400   (2 << 2)
-#define MTK_MDIO_CFG_RX_CLK_SKEW_INV   (3 << 2)
-#define MTK_MDIO_CFG_TX_CLK_SKEW_0     0
-#define MTK_MDIO_CFG_TX_CLK_SKEW_200   1
-#define MTK_MDIO_CFG_TX_CLK_SKEW_400   2
-#define MTK_MDIO_CFG_TX_CLK_SKEW_INV   3
-
-/* uni-cast port */
-#define MTK_GDM1_JMB_LEN_MASK  0xf
-#define MTK_GDM1_JMB_LEN_SHIFT 28
-#define MTK_GDM1_ICS_EN                BIT(22)
-#define MTK_GDM1_TCS_EN                BIT(21)
-#define MTK_GDM1_UCS_EN                BIT(20)
-#define MTK_GDM1_JMB_EN                BIT(19)
-#define MTK_GDM1_STRPCRC       BIT(16)
-#define MTK_GDM1_UFRC_P_CPU    (0 << 12)
-#define MTK_GDM1_UFRC_P_GDMA1  (1 << 12)
-#define MTK_GDM1_UFRC_P_PPE    (6 << 12)
-
-/* checksums */
-#define MTK_ICS_GEN_EN         BIT(2)
-#define MTK_UCS_GEN_EN         BIT(1)
-#define MTK_TCS_GEN_EN         BIT(0)
-
-/* dma mode */
-#define MTK_PDMA               BIT(0)
-#define MTK_QDMA               BIT(1)
-#define MTK_PDMA_RX_QDMA_TX    (MTK_PDMA | MTK_QDMA)
-
-/* dma ring */
-#define MTK_PST_DRX_IDX0       BIT(16)
-#define MTK_PST_DTX_IDX3       BIT(3)
-#define MTK_PST_DTX_IDX2       BIT(2)
-#define MTK_PST_DTX_IDX1       BIT(1)
-#define MTK_PST_DTX_IDX0       BIT(0)
-
-#define MTK_RX_2B_OFFSET       BIT(31)
-#define MTK_TX_WB_DDONE                BIT(6)
-#define MTK_RX_DMA_BUSY                BIT(3)
-#define MTK_TX_DMA_BUSY                BIT(1)
-#define MTK_RX_DMA_EN          BIT(2)
-#define MTK_TX_DMA_EN          BIT(0)
-
-#define MTK_PDMA_SIZE_4DWORDS  (0 << 4)
-#define MTK_PDMA_SIZE_8DWORDS  (1 << 4)
-#define MTK_PDMA_SIZE_16DWORDS (2 << 4)
-
-#define MTK_US_CYC_CNT_MASK    0xff
-#define MTK_US_CYC_CNT_SHIFT   0x8
-#define MTK_US_CYC_CNT_DIVISOR 1000000
-
-/* PDMA descriptor rxd2 */
-#define RX_DMA_DONE            BIT(31)
-#define RX_DMA_LSO             BIT(30)
-#define RX_DMA_PLEN0(_x)       (((_x) & 0x3fff) << 16)
-#define RX_DMA_GET_PLEN0(_x)   (((_x) >> 16) & 0x3fff)
-#define RX_DMA_TAG             BIT(15)
-
-/* PDMA descriptor rxd3 */
-#define RX_DMA_TPID(_x)                (((_x) >> 16) & 0xffff)
-#define RX_DMA_VID(_x)         ((_x) & 0xfff)
-
-/* PDMA descriptor rxd4 */
-#define RX_DMA_L4VALID         BIT(30)
-#define RX_DMA_FPORT_SHIFT     19
-#define RX_DMA_FPORT_MASK      0x7
-
-struct mtk_rx_dma {
-       unsigned int rxd1;
-       unsigned int rxd2;
-       unsigned int rxd3;
-       unsigned int rxd4;
-} __packed __aligned(4);
-
-/* PDMA tx descriptor bits */
-#define TX_DMA_BUF_LEN         0x3fff
-#define TX_DMA_PLEN0_MASK      (TX_DMA_BUF_LEN << 16)
-#define TX_DMA_PLEN0(_x)       (((_x) & TX_DMA_BUF_LEN) << 16)
-#define TX_DMA_PLEN1(_x)       ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN0(_x)    (((_x) >> 16) & TX_DMA_BUF_LEN)
-#define TX_DMA_GET_PLEN1(_x)    ((_x) & TX_DMA_BUF_LEN)
-#define TX_DMA_LS1             BIT(14)
-#define TX_DMA_LS0             BIT(30)
-#define TX_DMA_DONE            BIT(31)
-#define TX_DMA_FPORT_SHIFT     25
-#define TX_DMA_FPORT_MASK      0x7
-#define TX_DMA_INS_VLAN_MT7621 BIT(16)
-#define TX_DMA_INS_VLAN                BIT(7)
-#define TX_DMA_INS_PPPOE       BIT(12)
-#define TX_DMA_TAG             BIT(15)
-#define TX_DMA_TAG_MASK                BIT(15)
-#define TX_DMA_QN(_x)          ((_x) << 16)
-#define TX_DMA_PN(_x)          ((_x) << 24)
-#define TX_DMA_QN_MASK         TX_DMA_QN(0x7)
-#define TX_DMA_PN_MASK         TX_DMA_PN(0x7)
-#define TX_DMA_UDF             BIT(20)
-#define TX_DMA_CHKSUM          (0x7 << 29)
-#define TX_DMA_TSO             BIT(28)
-#define TX_DMA_DESP4_DEF       (TX_DMA_QN(3) | TX_DMA_PN(1))
-
-/* frame engine counters */
-#define MTK_PPE_AC_BCNT0       (MTK_CMTABLE_OFFSET + 0x00)
-#define MTK_GDMA1_TX_GBCNT     (MTK_CMTABLE_OFFSET + 0x300)
-#define MTK_GDMA2_TX_GBCNT     (MTK_GDMA1_TX_GBCNT + 0x40)
-
-/* phy device flags */
-#define MTK_PHY_FLAG_PORT      BIT(0)
-#define MTK_PHY_FLAG_ATTACH    BIT(1)
-
-struct mtk_tx_dma {
-       unsigned int txd1;
-       unsigned int txd2;
-       unsigned int txd3;
-       unsigned int txd4;
-} __packed __aligned(4);
-
-struct mtk_eth;
-struct mtk_mac;
-
-/* manage the attached phys */
-struct mtk_phy {
-       spinlock_t              lock;
-
-       struct phy_device       *phy[8];
-       struct device_node      *phy_node[8];
-       const __be32            *phy_fixed[8];
-       int                     duplex[8];
-       int                     speed[8];
-       int                     tx_fc[8];
-       int                     rx_fc[8];
-       int (*connect)(struct mtk_mac *mac);
-       void (*disconnect)(struct mtk_mac *mac);
-       void (*start)(struct mtk_mac *mac);
-       void (*stop)(struct mtk_mac *mac);
-};
-
-/* struct mtk_soc_data - the structure that holds the SoC specific data
- * @reg_table:         Some of the legacy registers changed their location
- *                     over time. Their offsets are stored in this table
- *
- * @init_data:         Some features depend on the silicon revision. This
- *                     callback allows runtime modification of the content of
- *                     this struct
- * @reset_fe:          This callback is used to trigger the reset of the frame
- *                     engine
- * @set_mac:           This callback is used to set the unicast mac address
- *                     filter
- * @fwd_config:                This callback is used to setup the forward config
- *                     register of the MAC
- * @switch_init:       This callback is used to bring up the switch core
- * @port_init:         Some SoCs have ports that can be router to a switch port
- *                     or an external PHY. This callback is used to setup these
- *                     ports.
- * @has_carrier:       This callback allows driver to check if there is a cable
- *                     attached.
- * @mdio_init:         This callbck is used to setup the MDIO bus if one is
- *                     present
- * @mdio_cleanup:      This callback is used to cleanup the MDIO state.
- * @mdio_write:                This callback is used to write data to the MDIO bus.
- * @mdio_read:         This callback is used to write data to the MDIO bus.
- * @mdio_adjust_link:  This callback is used to apply the PHY settings.
- * @piac_offset:       the PIAC register has a different different base offset
- * @hw_features:       feature set depends on the SoC type
- * @dma_ring_size:     allow GBit SoCs to set bigger rings than FE SoCs
- * @napi_weight:       allow GBit SoCs to set bigger napi weight than FE SoCs
- * @dma_type:          SoCs is PDMA, QDMA or a mix of the 2
- * @pdma_glo_cfg:      the default DMA configuration
- * @rx_int:            the TX interrupt bits used by the SoC
- * @tx_int:            the TX interrupt bits used by the SoC
- * @status_int:                the Status interrupt bits used by the SoC
- * @checksum_bit:      the bits used to turn on HW checksumming
- * @txd4:              default value of the TXD4 descriptor
- * @mac_count:         the number of MACs that the SoC has
- * @new_stats:         there is a old and new way to read hardware stats
- *                     registers
- * @jumbo_frame:       does the SoC support jumbo frames ?
- * @rx_2b_offset:      tell the rx dma to offset the data by 2 bytes
- * @rx_sg_dma:         scatter gather support
- * @padding_64b                enable 64 bit padding
- * @padding_bug:       rt2880 has a padding bug
- * @has_switch:                does the SoC have a built-in switch
- *
- * Although all of the supported SoCs share the same basic functionality, there
- * are several SoC specific functions and features that we need to support. This
- * struct holds the SoC specific data so that the common core can figure out
- * how to setup and use these differences.
- */
-struct mtk_soc_data {
-       const u16 *reg_table;
-
-       void (*init_data)(struct mtk_soc_data *data, struct net_device *netdev);
-       void (*reset_fe)(struct mtk_eth *eth);
-       void (*set_mac)(struct mtk_mac *mac, unsigned char *macaddr);
-       int (*fwd_config)(struct mtk_eth *eth);
-       int (*switch_init)(struct mtk_eth *eth);
-       void (*port_init)(struct mtk_eth *eth, struct mtk_mac *mac,
-                         struct device_node *port);
-       int (*has_carrier)(struct mtk_eth *eth);
-       int (*mdio_init)(struct mtk_eth *eth);
-       void (*mdio_cleanup)(struct mtk_eth *eth);
-       int (*mdio_write)(struct mii_bus *bus, int phy_addr, int phy_reg,
-                         u16 val);
-       int (*mdio_read)(struct mii_bus *bus, int phy_addr, int phy_reg);
-       void (*mdio_adjust_link)(struct mtk_eth *eth, int port);
-       u32 piac_offset;
-       netdev_features_t hw_features;
-       u32 dma_ring_size;
-       u32 napi_weight;
-       u32 dma_type;
-       u32 pdma_glo_cfg;
-       u32 rx_int;
-       u32 tx_int;
-       u32 status_int;
-       u32 checksum_bit;
-       u32 txd4;
-       u32 mac_count;
-
-       u32 new_stats:1;
-       u32 jumbo_frame:1;
-       u32 rx_2b_offset:1;
-       u32 rx_sg_dma:1;
-       u32 padding_64b:1;
-       u32 padding_bug:1;
-       u32 has_switch:1;
-};
-
-#define MTK_STAT_OFFSET                        0x40
-
-/* struct mtk_hw_stats - the structure that holds the traffic statistics.
- * @stats_lock:                make sure that stats operations are atomic
- * @reg_offset:                the status register offset of the SoC
- * @syncp:             the refcount
- *
- * All of the supported SoCs have hardware counters for traffic statstics.
- * Whenever the status IRQ triggers we can read the latest stats from these
- * counters and store them in this struct.
- */
-struct mtk_hw_stats {
-       spinlock_t stats_lock;
-       u32 reg_offset;
-       struct u64_stats_sync syncp;
-
-       u64 tx_bytes;
-       u64 tx_packets;
-       u64 tx_skip;
-       u64 tx_collisions;
-       u64 rx_bytes;
-       u64 rx_packets;
-       u64 rx_overflow;
-       u64 rx_fcs_errors;
-       u64 rx_short_errors;
-       u64 rx_long_errors;
-       u64 rx_checksum_errors;
-       u64 rx_flow_control_packets;
-};
-
-/* PDMA descriptor can point at 1-2 segments. This enum allows us to track how
- * memory was allocated so that it can be freed properly
- */
-enum mtk_tx_flags {
-       MTK_TX_FLAGS_SINGLE0    = 0x01,
-       MTK_TX_FLAGS_PAGE0      = 0x02,
-       MTK_TX_FLAGS_PAGE1      = 0x04,
-};
-
-/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
- *                     by the TX descriptor    s
- * @skb:               The SKB pointer of the packet being sent
- * @dma_addr0:         The base addr of the first segment
- * @dma_len0:          The length of the first segment
- * @dma_addr1:         The base addr of the second segment
- * @dma_len1:          The length of the second segment
- */
-struct mtk_tx_buf {
-       struct sk_buff *skb;
-       u32 flags;
-       DEFINE_DMA_UNMAP_ADDR(dma_addr0);
-       DEFINE_DMA_UNMAP_LEN(dma_len0);
-       DEFINE_DMA_UNMAP_ADDR(dma_addr1);
-       DEFINE_DMA_UNMAP_LEN(dma_len1);
-};
-
-/* struct mtk_tx_ring -        This struct holds info describing a TX ring
- * @tx_dma:            The descriptor ring
- * @tx_buf:            The memory pointed at by the ring
- * @tx_phys:           The physical addr of tx_buf
- * @tx_next_free:      Pointer to the next free descriptor
- * @tx_last_free:      Pointer to the last free descriptor
- * @tx_thresh:         The threshold of minimum amount of free descriptors
- * @tx_map:            Callback to map a new packet into the ring
- * @tx_poll:           Callback for the housekeeping function
- * @tx_clean:          Callback for the cleanup function
- * @tx_ring_size:      How many descriptors are in the ring
- * @tx_free_idx:       The index of th next free descriptor
- * @tx_next_idx:       QDMA uses a linked list. This element points to the next
- *                     free descriptor in the list
- * @tx_free_count:     QDMA uses a linked list. Track how many free descriptors
- *                     are present
- */
-struct mtk_tx_ring {
-       struct mtk_tx_dma *tx_dma;
-       struct mtk_tx_buf *tx_buf;
-       dma_addr_t tx_phys;
-       struct mtk_tx_dma *tx_next_free;
-       struct mtk_tx_dma *tx_last_free;
-       u16 tx_thresh;
-       int (*tx_map)(struct sk_buff *skb, struct net_device *dev, int tx_num,
-                     struct mtk_tx_ring *ring, bool gso);
-       int (*tx_poll)(struct mtk_eth *eth, int budget, bool *tx_again);
-       void (*tx_clean)(struct mtk_eth *eth);
-
-       /* PDMA only */
-       u16 tx_ring_size;
-       u16 tx_free_idx;
-
-       /* QDMA only */
-       u16 tx_next_idx;
-       atomic_t tx_free_count;
-};
-
-/* struct mtk_rx_ring -        This struct holds info describing a RX ring
- * @rx_dma:            The descriptor ring
- * @rx_data:           The memory pointed at by the ring
- * @trx_phys:          The physical addr of rx_buf
- * @rx_ring_size:      How many descriptors are in the ring
- * @rx_buf_size:       The size of each packet buffer
- * @rx_calc_idx:       The current head of ring
- */
-struct mtk_rx_ring {
-       struct mtk_rx_dma *rx_dma;
-       u8 **rx_data;
-       dma_addr_t rx_phys;
-       u16 rx_ring_size;
-       u16 frag_size;
-       u16 rx_buf_size;
-       u16 rx_calc_idx;
-};
-
-/* currently no SoC has more than 2 macs */
-#define MTK_MAX_DEVS                   2
-
-/* struct mtk_eth -    This is the main datasructure for holding the state
- *                     of the driver
- * @dev:               The device pointer
- * @base:              The mapped register i/o base
- * @page_lock:         Make sure that register operations are atomic
- * @soc:               pointer to our SoC specific data
- * @dummy_dev:         we run 2 netdevs on 1 physical DMA ring and need a
- *                     dummy for NAPI to work
- * @netdev:            The netdev instances
- * @mac:               Each netdev is linked to a physical MAC
- * @switch_np:         The phandle for the switch
- * @irq:               The IRQ that we are using
- * @msg_enable:                Ethtool msg level
- * @ysclk:             The sysclk rate - neeed for calibration
- * @ethsys:            The register map pointing at the range used to setup
- *                     MII modes
- * @dma_refcnt:                track how many netdevs are using the DMA engine
- * @tx_ring:           Pointer to the memore holding info about the TX ring
- * @rx_ring:           Pointer to the memore holding info about the RX ring
- * @rx_napi:           The NAPI struct
- * @scratch_ring:      Newer SoCs need memory for a second HW managed TX ring
- * @scratch_head:      The scratch memory that scratch_ring points to.
- * @phy:               Info about the attached PHYs
- * @mii_bus:           If there is a bus we need to create an instance for it
- * @link:              Track if the ports have a physical link
- * @sw_priv:           Pointer to the switches private data
- * @vlan_map:          RX VID tracking
- */
-
-struct mtk_eth {
-       struct device                   *dev;
-       void __iomem                    *base;
-       spinlock_t                      page_lock;
-       struct mtk_soc_data             *soc;
-       struct net_device               dummy_dev;
-       struct net_device               *netdev[MTK_MAX_DEVS];
-       struct mtk_mac                  *mac[MTK_MAX_DEVS];
-       struct device_node              *switch_np;
-       int                             irq;
-       u32                             msg_enable;
-       unsigned long                   sysclk;
-       struct regmap                   *ethsys;
-       atomic_t                        dma_refcnt;
-       struct mtk_tx_ring              tx_ring;
-       struct mtk_rx_ring              rx_ring[2];
-       struct napi_struct              rx_napi;
-       struct mtk_tx_dma               *scratch_ring;
-       void                            *scratch_head;
-       struct mtk_phy                  *phy;
-       struct mii_bus                  *mii_bus;
-       int                             link[8];
-       void                            *sw_priv;
-       unsigned long                   vlan_map;
-};
-
-/* struct mtk_mac -    the structure that holds the info about the MACs of the
- *                     SoC
- * @id:                        The number of the MAC
- * @of_node:           Our devicetree node
- * @hw:                        Backpointer to our main datastruture
- * @hw_stats:          Packet statistics counter
- * @phy_dev:           The attached PHY if available
- * @phy_flags:         The PHYs flags
- * @pending_work:      The workqueue used to reset the dma ring
- */
-struct mtk_mac {
-       int                             id;
-       struct device_node              *of_node;
-       struct mtk_eth                  *hw;
-       struct mtk_hw_stats             *hw_stats;
-       struct phy_device               *phy_dev;
-       u32                             phy_flags;
-       struct work_struct              pending_work;
-};
-
-/* the struct describing the SoC. these are declared in the soc_xyz.c files */
-extern const struct of_device_id of_mtk_match[];
-
-/* read the hardware status register */
-void mtk_stats_update_mac(struct mtk_mac *mac);
-
-/* default checksum setup handler */
-void mtk_reset(struct mtk_eth *eth, u32 reset_bits);
-
-/* register i/o wrappers */
-void mtk_w32(struct mtk_eth *eth, u32 val, unsigned int reg);
-u32 mtk_r32(struct mtk_eth *eth, unsigned int reg);
-
-/* default clock calibration handler */
-int mtk_set_clock_cycle(struct mtk_eth *eth);
-
-/* default checksum setup handler */
-void mtk_csum_config(struct mtk_eth *eth);
-
-/* default forward config handler */
-void mtk_fwd_config(struct mtk_eth *eth);
-
-#endif /* MTK_ETH_H */
diff --git a/drivers/staging/mt7621-eth/soc_mt7621.c b/drivers/staging/mt7621-eth/soc_mt7621.c
deleted file mode 100644 (file)
index 5d63b5d..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/*   This program is free software; you can redistribute it and/or modify
- *   it under the terms of the GNU General Public License as published by
- *   the Free Software Foundation; version 2 of the License
- *
- *   This program is distributed in the hope that it will be useful,
- *   but WITHOUT ANY WARRANTY; without even the implied warranty of
- *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *   GNU General Public License for more details.
- *
- *   Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
- *   Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
- *   Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/if_vlan.h>
-#include <linux/of_net.h>
-
-#include <asm/mach-ralink/ralink_regs.h>
-
-#include "mtk_eth_soc.h"
-#include "gsw_mt7620.h"
-#include "mdio.h"
-
-#define MT7620_CDMA_CSG_CFG    0x400
-#define MT7621_CDMP_IG_CTRL    (MT7620_CDMA_CSG_CFG + 0x00)
-#define MT7621_CDMP_EG_CTRL    (MT7620_CDMA_CSG_CFG + 0x04)
-#define MT7621_RESET_FE                BIT(6)
-#define MT7621_L4_VALID                BIT(24)
-
-#define MT7621_TX_DMA_UDF      BIT(19)
-
-#define CDMA_ICS_EN            BIT(2)
-#define CDMA_UCS_EN            BIT(1)
-#define CDMA_TCS_EN            BIT(0)
-
-#define GDMA_ICS_EN            BIT(22)
-#define GDMA_TCS_EN            BIT(21)
-#define GDMA_UCS_EN            BIT(20)
-
-/* frame engine counters */
-#define MT7621_REG_MIB_OFFSET  0x2000
-#define MT7621_PPE_AC_BCNT0    (MT7621_REG_MIB_OFFSET + 0x00)
-#define MT7621_GDM1_TX_GBCNT   (MT7621_REG_MIB_OFFSET + 0x400)
-#define MT7621_GDM2_TX_GBCNT   (MT7621_GDM1_TX_GBCNT + 0x40)
-
-#define GSW_REG_GDMA1_MAC_ADRL 0x508
-#define GSW_REG_GDMA1_MAC_ADRH 0x50C
-#define GSW_REG_GDMA2_MAC_ADRL 0x1508
-#define GSW_REG_GDMA2_MAC_ADRH 0x150C
-
-#define MT7621_MTK_RST_GL      0x04
-#define MT7620_MTK_INT_STATUS2 0x08
-
-/* MTK_INT_STATUS reg on mt7620 define CNT_GDM1_AF at BIT(29)
- * but after test it should be BIT(13).
- */
-#define MT7621_MTK_GDM1_AF     BIT(28)
-#define MT7621_MTK_GDM2_AF     BIT(29)
-
-static const u16 mt7621_reg_table[MTK_REG_COUNT] = {
-       [MTK_REG_PDMA_GLO_CFG] = RT5350_PDMA_GLO_CFG,
-       [MTK_REG_PDMA_RST_CFG] = RT5350_PDMA_RST_CFG,
-       [MTK_REG_DLY_INT_CFG] = RT5350_DLY_INT_CFG,
-       [MTK_REG_TX_BASE_PTR0] = RT5350_TX_BASE_PTR0,
-       [MTK_REG_TX_MAX_CNT0] = RT5350_TX_MAX_CNT0,
-       [MTK_REG_TX_CTX_IDX0] = RT5350_TX_CTX_IDX0,
-       [MTK_REG_TX_DTX_IDX0] = RT5350_TX_DTX_IDX0,
-       [MTK_REG_RX_BASE_PTR0] = RT5350_RX_BASE_PTR0,
-       [MTK_REG_RX_MAX_CNT0] = RT5350_RX_MAX_CNT0,
-       [MTK_REG_RX_CALC_IDX0] = RT5350_RX_CALC_IDX0,
-       [MTK_REG_RX_DRX_IDX0] = RT5350_RX_DRX_IDX0,
-       [MTK_REG_MTK_INT_ENABLE] = RT5350_MTK_INT_ENABLE,
-       [MTK_REG_MTK_INT_STATUS] = RT5350_MTK_INT_STATUS,
-       [MTK_REG_MTK_DMA_VID_BASE] = 0,
-       [MTK_REG_MTK_COUNTER_BASE] = MT7621_GDM1_TX_GBCNT,
-       [MTK_REG_MTK_RST_GL] = MT7621_MTK_RST_GL,
-       [MTK_REG_MTK_INT_STATUS2] = MT7620_MTK_INT_STATUS2,
-};
-
-static void mt7621_mtk_reset(struct mtk_eth *eth)
-{
-       mtk_reset(eth, MT7621_RESET_FE);
-}
-
-static int mt7621_fwd_config(struct mtk_eth *eth)
-{
-       /* Setup GMAC1 only, there is no support for GMAC2 yet */
-       mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) & ~0xffff,
-               MT7620_GDMA1_FWD_CFG);
-
-       /* Enable RX checksum */
-       mtk_w32(eth, mtk_r32(eth, MT7620_GDMA1_FWD_CFG) | (GDMA_ICS_EN |
-                      GDMA_TCS_EN | GDMA_UCS_EN),
-                      MT7620_GDMA1_FWD_CFG);
-
-       /* Enable RX VLan Offloading */
-       mtk_w32(eth, 0, MT7621_CDMP_EG_CTRL);
-
-       return 0;
-}
-
-static void mt7621_set_mac(struct mtk_mac *mac, unsigned char *hwaddr)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&mac->hw->page_lock, flags);
-       if (mac->id == 0) {
-               mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-                       GSW_REG_GDMA1_MAC_ADRH);
-               mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-                       (hwaddr[4] << 8) | hwaddr[5],
-                       GSW_REG_GDMA1_MAC_ADRL);
-       }
-       if (mac->id == 1) {
-               mtk_w32(mac->hw, (hwaddr[0] << 8) | hwaddr[1],
-                       GSW_REG_GDMA2_MAC_ADRH);
-               mtk_w32(mac->hw, (hwaddr[2] << 24) | (hwaddr[3] << 16) |
-                       (hwaddr[4] << 8) | hwaddr[5],
-                       GSW_REG_GDMA2_MAC_ADRL);
-       }
-       spin_unlock_irqrestore(&mac->hw->page_lock, flags);
-}
-
-static struct mtk_soc_data mt7621_data = {
-       .hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
-                      NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
-                      NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
-                      NETIF_F_IPV6_CSUM,
-       .dma_type = MTK_PDMA,
-       .dma_ring_size = 256,
-       .napi_weight = 64,
-       .new_stats = 1,
-       .padding_64b = 1,
-       .rx_2b_offset = 1,
-       .rx_sg_dma = 1,
-       .has_switch = 1,
-       .mac_count = 2,
-       .reset_fe = mt7621_mtk_reset,
-       .set_mac = mt7621_set_mac,
-       .fwd_config = mt7621_fwd_config,
-       .switch_init = mtk_gsw_init,
-       .reg_table = mt7621_reg_table,
-       .pdma_glo_cfg = MTK_PDMA_SIZE_16DWORDS,
-       .rx_int = RT5350_RX_DONE_INT,
-       .tx_int = RT5350_TX_DONE_INT,
-       .status_int = MT7621_MTK_GDM1_AF | MT7621_MTK_GDM2_AF,
-       .checksum_bit = MT7621_L4_VALID,
-       .has_carrier = mt7620_has_carrier,
-       .mdio_read = mt7620_mdio_read,
-       .mdio_write = mt7620_mdio_write,
-       .mdio_adjust_link = mt7620_mdio_link_adjust,
-};
-
-const struct of_device_id of_mtk_match[] = {
-       { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
-       {},
-};
-
-MODULE_DEVICE_TABLE(of, of_mtk_match);
index d33533872a16f1c4e4e3d7207d6cc04cd6a63379..c8fa17cfa807a24f6450034c714ebc94b59507d4 100644 (file)
@@ -1,6 +1,7 @@
 config PCI_MT7621
        tristate "MediaTek MT7621 PCI Controller"
        depends on RALINK
+       depends on PCI
        select PCI_DRIVERS_GENERIC
        help
          This selects a driver for the MediaTek MT7621 PCI Controller.
index d6248eecf123bdc5ad123ac1fd7b5821b9a22e24..2aee64fdaec555abf8734aef11c7d269dca86150 100644 (file)
@@ -163,7 +163,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev)
                goto no_phy;
 
        phydev = of_phy_connect(dev, phy_node, cvm_oct_adjust_link, 0,
-                               PHY_INTERFACE_MODE_GMII);
+                               priv->phy_mode);
        of_node_put(phy_node);
 
        if (!phydev)
index ce61c5670ef645c78e080ab588b0d75c7591378c..986db76705ccc6b5d384f2db08f715739936d942 100644 (file)
@@ -653,14 +653,37 @@ static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
        return np;
 }
 
-static void cvm_set_rgmii_delay(struct device_node *np, int iface, int port)
+static void cvm_set_rgmii_delay(struct octeon_ethernet *priv, int iface,
+                               int port)
 {
+       struct device_node *np = priv->of_node;
        u32 delay_value;
+       bool rx_delay;
+       bool tx_delay;
 
-       if (!of_property_read_u32(np, "rx-delay", &delay_value))
+       /* By default, both RX/TX delay is enabled in
+        * __cvmx_helper_rgmii_enable().
+        */
+       rx_delay = true;
+       tx_delay = true;
+
+       if (!of_property_read_u32(np, "rx-delay", &delay_value)) {
                cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, iface), delay_value);
-       if (!of_property_read_u32(np, "tx-delay", &delay_value))
+               rx_delay = delay_value > 0;
+       }
+       if (!of_property_read_u32(np, "tx-delay", &delay_value)) {
                cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, iface), delay_value);
+               tx_delay = delay_value > 0;
+       }
+
+       if (!rx_delay && !tx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
+       else if (!rx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+       else if (!tx_delay)
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII_TXID;
+       else
+               priv->phy_mode = PHY_INTERFACE_MODE_RGMII;
 }
 
 static int cvm_oct_probe(struct platform_device *pdev)
@@ -825,6 +848,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                        priv->port = port;
                        priv->queue = cvmx_pko_get_base_queue(priv->port);
                        priv->fau = fau - cvmx_pko_get_num_queues(port) * 4;
+                       priv->phy_mode = PHY_INTERFACE_MODE_NA;
                        for (qos = 0; qos < 16; qos++)
                                skb_queue_head_init(&priv->tx_free_list[qos]);
                        for (qos = 0; qos < cvmx_pko_get_num_queues(port);
@@ -856,6 +880,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
                                break;
 
                        case CVMX_HELPER_INTERFACE_MODE_SGMII:
+                               priv->phy_mode = PHY_INTERFACE_MODE_SGMII;
                                dev->netdev_ops = &cvm_oct_sgmii_netdev_ops;
                                strcpy(dev->name, "eth%d");
                                break;
@@ -865,11 +890,16 @@ static int cvm_oct_probe(struct platform_device *pdev)
                                strcpy(dev->name, "spi%d");
                                break;
 
-                       case CVMX_HELPER_INTERFACE_MODE_RGMII:
                        case CVMX_HELPER_INTERFACE_MODE_GMII:
+                               priv->phy_mode = PHY_INTERFACE_MODE_GMII;
+                               dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
+                               strcpy(dev->name, "eth%d");
+                               break;
+
+                       case CVMX_HELPER_INTERFACE_MODE_RGMII:
                                dev->netdev_ops = &cvm_oct_rgmii_netdev_ops;
                                strcpy(dev->name, "eth%d");
-                               cvm_set_rgmii_delay(priv->of_node, interface,
+                               cvm_set_rgmii_delay(priv, interface,
                                                    port_index);
                                break;
                        }
index 4a07e7f43d128cb6903891ad6fcbd399b61e0e61..be570d33685add6873f62740f7c333b8920096aa 100644 (file)
@@ -12,7 +12,7 @@
 #define OCTEON_ETHERNET_H
 
 #include <linux/of.h>
-
+#include <linux/phy.h>
 #include <asm/octeon/cvmx-helper-board.h>
 
 /**
@@ -33,6 +33,8 @@ struct octeon_ethernet {
         * cvmx_helper_interface_mode_t
         */
        int imode;
+       /* PHY mode */
+       phy_interface_t phy_mode;
        /* List of outstanding tx buffers per queue */
        struct sk_buff_head tx_free_list[16];
        unsigned int last_speed;
index 80b8d4153414a80d555d1dfe87fb48e77e684e10..a54286498a477fd2b935683f695d6e57ba8382d5 100644 (file)
@@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
 {
        unsigned char lob;
        int ret, i;
-       struct dcon_gpio *pin = &gpios_asis[0];
+       const struct dcon_gpio *pin = &gpios_asis[0];
 
        for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
                gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
index 1723a47a96b4092fb16df938f9ec1699c220f0f0..952f2ab5134783db8e8978fd79282045a9f65db6 100644 (file)
@@ -174,7 +174,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
 
        pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
 
-       rtw_alloc_hwxmits(padapter);
+       res = rtw_alloc_hwxmits(padapter);
+       if (res == _FAIL)
+               goto exit;
        rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
        for (i = 0; i < 4; i++)
@@ -1503,7 +1505,7 @@ exit:
        return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
        struct hw_xmit *hwxmits;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -1512,6 +1514,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        pxmitpriv->hwxmits = kcalloc(pxmitpriv->hwxmit_entry,
                                     sizeof(struct hw_xmit), GFP_KERNEL);
+       if (!pxmitpriv->hwxmits)
+               return _FAIL;
 
        hwxmits = pxmitpriv->hwxmits;
 
@@ -1519,6 +1523,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
        hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
        hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
        hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+       return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
index 788f59c74ea1e45fb7e598a8f83361b586647969..ba7e15fbde72d60ab9f5589756f024642359a82f 100644 (file)
@@ -336,7 +336,7 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
 void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
 
index 1920d02f7c9f3724cb1516f857875f1bb8a76cfd..8c36acedf50769312ab170c983c192824fcad66a 100644 (file)
@@ -147,17 +147,9 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
 
 static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
 {
-       u32 val;
-       void (*pcmd_callback)(struct _adapter *dev, struct cmd_obj      *pcmd);
        struct cmd_obj *pcmd  = (struct cmd_obj *)pbuf;
 
-       if (pcmd->rsp && pcmd->rspsz > 0)
-               memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
-       pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
-       if (!pcmd_callback)
-               r8712_free_cmd_obj(pcmd);
-       else
-               pcmd_callback(padapter, pcmd);
+       r8712_free_cmd_obj(pcmd);
        return H2C_SUCCESS;
 }
 
index 92fb77666d4462d411d927d26a7bd58d1503c8ca..1ef86b8c592f1490c41bb5436fb45d4e8e065ed5 100644 (file)
@@ -140,7 +140,7 @@ enum rtl8712_h2c_cmd {
 static struct _cmd_callback    cmd_callback[] = {
        {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
        {GEN_CMD_CODE(_Write_MACREG), NULL},
-       {GEN_CMD_CODE(_Read_BBREG), &r8712_getbbrfreg_cmdrsp_callback},
+       {GEN_CMD_CODE(_Read_BBREG), NULL},
        {GEN_CMD_CODE(_Write_BBREG), NULL},
        {GEN_CMD_CODE(_Read_RFREG), &r8712_getbbrfreg_cmdrsp_callback},
        {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
index 094d61bcb46983226a2cbf8974c5dd2e78758e05..b87f13a0b5639acbbb9d88265a6e810ddfb80924 100644 (file)
@@ -260,7 +260,9 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
                }
        }
 
-       rtw_alloc_hwxmits(padapter);
+       res = rtw_alloc_hwxmits(padapter);
+       if (res == _FAIL)
+               goto exit;
        rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
 
        for (i = 0; i < 4; i++) {
@@ -2144,7 +2146,7 @@ exit:
        return res;
 }
 
-void rtw_alloc_hwxmits(struct adapter *padapter)
+s32 rtw_alloc_hwxmits(struct adapter *padapter)
 {
        struct hw_xmit *hwxmits;
        struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -2155,10 +2157,8 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        pxmitpriv->hwxmits = rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
 
-       if (pxmitpriv->hwxmits == NULL) {
-               DBG_871X("alloc hwxmits fail!...\n");
-               return;
-       }
+       if (!pxmitpriv->hwxmits)
+               return _FAIL;
 
        hwxmits = pxmitpriv->hwxmits;
 
@@ -2204,7 +2204,7 @@ void rtw_alloc_hwxmits(struct adapter *padapter)
 
        }
 
-
+       return _SUCCESS;
 }
 
 void rtw_free_hwxmits(struct adapter *padapter)
index 1b38b9182b3165bdfacf97fcc2fa43c34b276468..37f42b2f22f1dcf173b2deeadc904df79f216036 100644 (file)
@@ -487,7 +487,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
 void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv);
 
 
-void rtw_alloc_hwxmits(struct adapter *padapter);
+s32 rtw_alloc_hwxmits(struct adapter *padapter);
 void rtw_free_hwxmits(struct adapter *padapter);
 
 
index 9930ed954abb2d8aa437a1784fbd3e3f83a043ca..4cc77b2016e1e45f14834820b429f7e8cda19b42 100644 (file)
@@ -180,6 +180,8 @@ static int rtl_phydm_init_priv(struct rtl_priv *rtlpriv,
 
        rtlpriv->phydm.internal =
                kzalloc(sizeof(struct phy_dm_struct), GFP_KERNEL);
+       if (!rtlpriv->phydm.internal)
+               return 0;
 
        _rtl_phydm_init_com_info(rtlpriv, ic, params);
 
index f061dd1382aa102e53ac532844c8e2c9959ae568..cf6b7a80b753b35dc2d488e589f97f9d1899fedb 100644 (file)
@@ -743,6 +743,8 @@ void rtl8822be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
                      u1_rsvd_page_loc, 3);
 
        skb = dev_alloc_skb(totalpacketlen);
+       if (!skb)
+               return;
        memcpy((u8 *)skb_put(skb, totalpacketlen), &reserved_page_packet,
               totalpacketlen);
 
index edff6ce8565558f0671a4fb13119488a6dd254c4..9d85a3a1af4c5eadef475ffd20e39e66612955dc 100644 (file)
@@ -210,12 +210,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
                return -EINVAL;
 
        spin_lock_irqsave(&speakup_info.spinlock, flags);
+       synth_soft.alive = 1;
        while (1) {
                prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
-               if (!unicode)
-                       synth_buffer_skip_nonlatin1();
-               if (!synth_buffer_empty() || speakup_info.flushing)
-                       break;
+               if (synth_current() == &synth_soft) {
+                       if (!unicode)
+                               synth_buffer_skip_nonlatin1();
+                       if (!synth_buffer_empty() || speakup_info.flushing)
+                               break;
+               }
                spin_unlock_irqrestore(&speakup_info.spinlock, flags);
                if (fp->f_flags & O_NONBLOCK) {
                        finish_wait(&speakup_event, &wait);
@@ -235,6 +238,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
 
        /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
        while (chars_sent <= count - bytes_per_ch) {
+               if (synth_current() != &synth_soft)
+                       break;
                if (speakup_info.flushing) {
                        speakup_info.flushing = 0;
                        ch = '\x18';
@@ -331,7 +336,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
        poll_wait(fp, &speakup_event, wait);
 
        spin_lock_irqsave(&speakup_info.spinlock, flags);
-       if (!synth_buffer_empty() || speakup_info.flushing)
+       if (synth_current() == &synth_soft &&
+           (!synth_buffer_empty() || speakup_info.flushing))
                ret = EPOLLIN | EPOLLRDNORM;
        spin_unlock_irqrestore(&speakup_info.spinlock, flags);
        return ret;
index c8e688878fc705a47d88cfa1f4f73e2dceaefdc2..ac6a74883af4753d33906e62c366b3d4f8a1c677 100644 (file)
@@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
 int synth_release_region(unsigned long start, unsigned long n);
 int synth_add(struct spk_synth *in_synth);
 void synth_remove(struct spk_synth *in_synth);
+struct spk_synth *synth_current(void);
 
 extern struct speakup_info_t speakup_info;
 
index 25f259ee4ffc74990e5a19c8560840ca9a59058e..3568bfb89912c3316d649b6c19223f4206936457 100644 (file)
@@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
 }
 EXPORT_SYMBOL_GPL(synth_remove);
 
+struct spk_synth *synth_current(void)
+{
+       return synth;
+}
+EXPORT_SYMBOL_GPL(synth_current);
+
 short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
index 804daf83be35172ecda66b35a9bbfd7e6e4fc5c2..064d0db4c51ef14af59f908a32df3c94fa768714 100644 (file)
@@ -3513,6 +3513,7 @@ static int vchiq_probe(struct platform_device *pdev)
        struct device_node *fw_node;
        const struct of_device_id *of_id;
        struct vchiq_drvdata *drvdata;
+       struct device *vchiq_dev;
        int err;
 
        of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
@@ -3547,9 +3548,12 @@ static int vchiq_probe(struct platform_device *pdev)
                goto failed_platform_init;
        }
 
-       if (IS_ERR(device_create(vchiq_class, &pdev->dev, vchiq_devid,
-                                NULL, "vchiq")))
+       vchiq_dev = device_create(vchiq_class, &pdev->dev, vchiq_devid, NULL,
+                                 "vchiq");
+       if (IS_ERR(vchiq_dev)) {
+               err = PTR_ERR(vchiq_dev);
                goto failed_device_create;
+       }
 
        vchiq_debugfs_init();
 
index b370985b58a101f65e561c4cdbc43d51a26d1e19..c6bb4aaf9bd02fc18b6ca9e1bbc7a37e5724c805 100644 (file)
@@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
                return;
        }
 
-       MACvIntDisable(priv->PortOffset);
-
        spin_lock_irqsave(&priv->lock, flags);
 
        /* Read low level stats */
@@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
        }
 
        spin_unlock_irqrestore(&priv->lock, flags);
-
-       MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static void vnt_interrupt_work(struct work_struct *work)
@@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
 
        if (priv->vif)
                vnt_interrupt_process(priv);
+
+       MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
 }
 
 static irqreturn_t vnt_interrupt(int irq,  void *arg)
 {
        struct vnt_private *priv = arg;
 
-       if (priv->vif)
-               schedule_work(&priv->interrupt_work);
+       schedule_work(&priv->interrupt_work);
+
+       MACvIntDisable(priv->PortOffset);
 
        return IRQ_HANDLED;
 }
index db5df3d548188b6c440db8928ac157b5512174bb..3bdd56a1021b26d6e74ff98ad6f0e25f25f5de08 100644 (file)
@@ -49,11 +49,6 @@ struct ar933x_uart_port {
        struct clk              *clk;
 };
 
-static inline bool ar933x_uart_console_enabled(void)
-{
-       return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
-}
-
 static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
                                            int offset)
 {
@@ -508,6 +503,7 @@ static const struct uart_ops ar933x_uart_ops = {
        .verify_port    = ar933x_uart_verify_port,
 };
 
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
 static struct ar933x_uart_port *
 ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
 
@@ -604,14 +600,7 @@ static struct console ar933x_uart_console = {
        .index          = -1,
        .data           = &ar933x_uart_driver,
 };
-
-static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
-{
-       if (!ar933x_uart_console_enabled())
-               return;
-
-       ar933x_console_ports[up->port.line] = up;
-}
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
 
 static struct uart_driver ar933x_uart_driver = {
        .owner          = THIS_MODULE,
@@ -700,7 +689,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
        baud = ar933x_uart_get_baud(port->uartclk, 0, AR933X_UART_MAX_STEP);
        up->max_baud = min_t(unsigned int, baud, AR933X_UART_MAX_BAUD);
 
-       ar933x_uart_add_console_port(up);
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+       ar933x_console_ports[up->port.line] = up;
+#endif
 
        ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
        if (ret)
@@ -749,8 +740,9 @@ static int __init ar933x_uart_init(void)
 {
        int ret;
 
-       if (ar933x_uart_console_enabled())
-               ar933x_uart_driver.cons = &ar933x_uart_console;
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+       ar933x_uart_driver.cons = &ar933x_uart_console;
+#endif
 
        ret = uart_register_driver(&ar933x_uart_driver);
        if (ret)
index 05147fe243434a52e4ca827227d95dc1f6a0f2e6..0b4f3690532145da4228b8b8255f0d5da928a31f 100644 (file)
@@ -166,6 +166,8 @@ struct atmel_uart_port {
        unsigned int            pending_status;
        spinlock_t              lock_suspended;
 
+       bool                    hd_start_rx;    /* can start RX during half-duplex operation */
+
        /* ISO7816 */
        unsigned int            fidi_min;
        unsigned int            fidi_max;
@@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
        __raw_writeb(value, port->membase + ATMEL_US_THR);
 }
 
+static inline int atmel_uart_is_half_duplex(struct uart_port *port)
+{
+       return ((port->rs485.flags & SER_RS485_ENABLED) &&
+               !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
+               (port->iso7816.flags & SER_ISO7816_ENABLED);
+}
+
 #ifdef CONFIG_SERIAL_ATMEL_PDC
 static bool atmel_use_pdc_rx(struct uart_port *port)
 {
@@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
        /* Disable interrupts */
        atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
 
-       if (((port->rs485.flags & SER_RS485_ENABLED) &&
-            !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-           port->iso7816.flags & SER_ISO7816_ENABLED)
+       if (atmel_uart_is_half_duplex(port))
                atmel_start_rx(port);
+
 }
 
 /*
@@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
                return;
 
        if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
-               if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                   port->iso7816.flags & SER_ISO7816_ENABLED)
+               if (atmel_uart_is_half_duplex(port))
                        atmel_stop_rx(port);
 
        if (atmel_use_pdc_tx(port))
@@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
         */
        if (!uart_circ_empty(xmit))
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
-       else if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                 !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                port->iso7816.flags & SER_ISO7816_ENABLED) {
-               /* DMA done, stop TX, start RX for RS485 */
-               atmel_start_rx(port);
+       else if (atmel_uart_is_half_duplex(port)) {
+               /*
+                * DMA done, re-enable TXEMPTY and signal that we can stop
+                * TX and start RX for RS485
+                */
+               atmel_port->hd_start_rx = true;
+               atmel_uart_writel(port, ATMEL_US_IER,
+                                 atmel_port->tx_done_mask);
        }
 
        spin_unlock_irqrestore(&port->lock, flags);
@@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
                                         sg_dma_len(&atmel_port->sg_rx)/2,
                                         DMA_DEV_TO_MEM,
                                         DMA_PREP_INTERRUPT);
+       if (!desc) {
+               dev_err(port->dev, "Preparing DMA cyclic failed\n");
+               goto chan_err;
+       }
        desc->callback = atmel_complete_rx_dma;
        desc->callback_param = port;
        atmel_port->desc_rx = desc;
@@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
        struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
 
        if (pending & atmel_port->tx_done_mask) {
-               /* Either PDC or interrupt transmission */
                atmel_uart_writel(port, ATMEL_US_IDR,
                                  atmel_port->tx_done_mask);
+
+               /* Start RX if flag was set and FIFO is empty */
+               if (atmel_port->hd_start_rx) {
+                       if (!(atmel_uart_readl(port, ATMEL_US_CSR)
+                                       & ATMEL_US_TXEMPTY))
+                               dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
+
+                       atmel_port->hd_start_rx = false;
+                       atmel_start_rx(port);
+                       return;
+               }
+
                atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
        }
 }
@@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
                atmel_uart_writel(port, ATMEL_US_IER,
                                  atmel_port->tx_done_mask);
        } else {
-               if (((port->rs485.flags & SER_RS485_ENABLED) &&
-                    !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
-                   port->iso7816.flags & SER_ISO7816_ENABLED) {
+               if (atmel_uart_is_half_duplex(port)) {
                        /* DMA done, stop TX, start RX for RS485 */
                        atmel_start_rx(port);
                }
index 6fb312e7af713ecd3efcc4c0ef069602635f7681..bfe5e9e034ecf86b3de80476eb90a44a0d228359 100644 (file)
@@ -148,8 +148,10 @@ static int configure_kgdboc(void)
        char *cptr = config;
        struct console *cons;
 
-       if (!strlen(config) || isspace(config[0]))
+       if (!strlen(config) || isspace(config[0])) {
+               err = 0;
                goto noconfig;
+       }
 
        kgdboc_io_ops.is_console = 0;
        kgdb_tty_driver = NULL;
index f5bdde40562750c7695823e93bab799bf5593ad5..450ba6d7996c229e7e3a796439f8e6caa1dbaebb 100644 (file)
@@ -1415,6 +1415,8 @@ static int max310x_spi_probe(struct spi_device *spi)
        if (spi->dev.of_node) {
                const struct of_device_id *of_id =
                        of_match_device(max310x_dt_ids, &spi->dev);
+               if (!of_id)
+                       return -ENODEV;
 
                devtype = (struct max310x_devtype *)of_id->data;
        } else {
index 231f751d1ef48b42e4a9820c73408242862035e9..7e7b1559fa3695406ae80edda49f0f1f7634dc9a 100644 (file)
@@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
+       if (!match)
+               return -ENODEV;
+
        /* Assume that all UART ports have a DT alias or none has */
        id = of_alias_get_id(pdev->dev.of_node, "serial");
        if (!pdev->dev.of_node || id < 0)
index 27235a526cce8c4b59aa14f6764e466b10988748..4c188f4079b3ea68ee51982b41d4b14ba27567c4 100644 (file)
@@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
 
        s->port.mapbase = r->start;
        s->port.membase = ioremap(r->start, resource_size(r));
+       if (!s->port.membase) {
+               ret = -ENOMEM;
+               goto out_disable_clks;
+       }
        s->port.ops = &mxs_auart_ops;
        s->port.iotype = UPIO_MEM;
        s->port.fifosize = MXS_AUART_FIFO_SIZE;
index 3bcec1c20219102b277aafc72b548425df33e175..35e5f9c5d5bed48274363343366c8bd76395c500 100644 (file)
@@ -1050,7 +1050,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
 {
        struct uart_port *uport;
        struct qcom_geni_serial_port *port;
-       int baud;
+       int baud = 9600;
        int bits = 8;
        int parity = 'n';
        int flow = 'n';
index 635178cf3eed538aa35bf49225a4886097e2e7b0..09a183dfc52640027bf571184ee4e69e819c5951 100644 (file)
@@ -1507,7 +1507,7 @@ static int __init sc16is7xx_init(void)
        ret = i2c_add_driver(&sc16is7xx_i2c_uart_driver);
        if (ret < 0) {
                pr_err("failed to init sc16is7xx i2c --> %d\n", ret);
-               return ret;
+               goto err_i2c;
        }
 #endif
 
@@ -1515,10 +1515,18 @@ static int __init sc16is7xx_init(void)
        ret = spi_register_driver(&sc16is7xx_spi_uart_driver);
        if (ret < 0) {
                pr_err("failed to init sc16is7xx spi --> %d\n", ret);
-               return ret;
+               goto err_spi;
        }
 #endif
        return ret;
+
+err_spi:
+#ifdef CONFIG_SERIAL_SC16IS7XX_I2C
+       i2c_del_driver(&sc16is7xx_i2c_uart_driver);
+#endif
+err_i2c:
+       uart_unregister_driver(&sc16is7xx_uart);
+       return ret;
 }
 module_init(sc16is7xx_init);
 
index 060fcd42b6d560105a114c9923ce1cdcc177b696..2d1c626312cd8892d5eae0fa65e03d3347a09e81 100644 (file)
@@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
 
        if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                uart_write_wakeup(port);
-       if (uart_circ_empty(xmit)) {
+       if (uart_circ_empty(xmit))
                sci_stop_tx(port);
-       } else {
-               ctrl = serial_port_in(port, SCSCR);
-
-               if (port->type != PORT_SCI) {
-                       serial_port_in(port, SCxSR); /* Dummy read */
-                       sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
-               }
 
-               ctrl |= SCSCR_TIE;
-               serial_port_out(port, SCSCR, ctrl);
-       }
 }
 
 /* On SH3, SCIF may read end-of-break as a space->mark char */
index 044c3cbdcfa40664497d13bd00e607584eff99c7..a9e12b3bc31d7e19966c724b8b31ce3ac64c5242 100644 (file)
@@ -325,7 +325,7 @@ static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
                if (tty && C_HUPCL(tty))
                        tty_port_lower_dtr_rts(port);
 
-               if (port->ops->shutdown)
+               if (port->ops && port->ops->shutdown)
                        port->ops->shutdown(port);
        }
 out:
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
  */
 int tty_port_carrier_raised(struct tty_port *port)
 {
-       if (port->ops->carrier_raised == NULL)
+       if (!port->ops || !port->ops->carrier_raised)
                return 1;
        return port->ops->carrier_raised(port);
 }
@@ -414,7 +414,7 @@ EXPORT_SYMBOL(tty_port_carrier_raised);
  */
 void tty_port_raise_dtr_rts(struct tty_port *port)
 {
-       if (port->ops->dtr_rts)
+       if (port->ops && port->ops->dtr_rts)
                port->ops->dtr_rts(port, 1);
 }
 EXPORT_SYMBOL(tty_port_raise_dtr_rts);
@@ -429,7 +429,7 @@ EXPORT_SYMBOL(tty_port_raise_dtr_rts);
  */
 void tty_port_lower_dtr_rts(struct tty_port *port)
 {
-       if (port->ops->dtr_rts)
+       if (port->ops && port->ops->dtr_rts)
                port->ops->dtr_rts(port, 0);
 }
 EXPORT_SYMBOL(tty_port_lower_dtr_rts);
@@ -684,7 +684,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
 
        if (!tty_port_initialized(port)) {
                clear_bit(TTY_IO_ERROR, &tty->flags);
-               if (port->ops->activate) {
+               if (port->ops && port->ops->activate) {
                        int retval = port->ops->activate(port, tty);
                        if (retval) {
                                mutex_unlock(&port->mutex);
index 739f8960811ac89d6f960a184155f4e0c602101a..ec666eb4b7b445d98cbc3ff59be63c1b7aa90437 100644 (file)
@@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
                clear_bit(EVENT_RX_STALL, &acm->flags);
        }
 
-       if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
+       if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
                tty_port_tty_wakeup(&acm->port);
-               clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
-       }
 }
 
 /*
index 48277bbc15e4d155fc9c1c7315fcf57d6347b51e..73c8e65917461f8f83d9233c96bdf0d2b8956b27 100644 (file)
@@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
 
        do {
                controller = of_find_node_with_property(controller, "phys");
+               if (!of_device_is_available(controller))
+                       continue;
                index = 0;
                do {
                        if (arg0 == -1) {
index 3189181bb628d921309d44296da06eff92b5d1bd..975d7c1288e36534bdd08be7e84642acdba535c7 100644 (file)
@@ -2741,6 +2741,9 @@ int usb_add_hcd(struct usb_hcd *hcd,
 
                retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
                                                  PHY_MODE_USB_HOST_SS);
+               if (retval)
+                       retval = usb_phy_roothub_set_mode(hcd->phy_roothub,
+                                                         PHY_MODE_USB_HOST);
                if (retval)
                        goto err_usb_phy_roothub_power_on;
 
index fdc6e4e403e81736db077e0c7cdf212aa6da2874..8cced3609e243b186caedd3eeb79e338c6151a73 100644 (file)
@@ -29,6 +29,7 @@
 #define PCI_DEVICE_ID_INTEL_BXT_M              0x1aaa
 #define PCI_DEVICE_ID_INTEL_APL                        0x5aaa
 #define PCI_DEVICE_ID_INTEL_KBP                        0xa2b0
+#define PCI_DEVICE_ID_INTEL_CMLH               0x02ee
 #define PCI_DEVICE_ID_INTEL_GLK                        0x31aa
 #define PCI_DEVICE_ID_INTEL_CNPLP              0x9dee
 #define PCI_DEVICE_ID_INTEL_CNPH               0xa36e
@@ -305,6 +306,9 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
          (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
 
+       { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
+         (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
          (kernel_ulong_t) &dwc3_pci_intel_properties, },
 
index 75b113a5b25cb6af28a8d7776e58678f6fcf7202..f3816a5c861eeeafdf1230afc1e7ca8fe41efa55 100644 (file)
@@ -391,20 +391,20 @@ try_again:
        req->complete = f_hidg_req_complete;
        req->context  = hidg;
 
+       spin_unlock_irqrestore(&hidg->write_spinlock, flags);
+
        status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
        if (status < 0) {
                ERROR(hidg->func.config->cdev,
                        "usb_ep_queue error on int endpoint %zd\n", status);
-               goto release_write_pending_unlocked;
+               goto release_write_pending;
        } else {
                status = count;
        }
-       spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
        return status;
 release_write_pending:
        spin_lock_irqsave(&hidg->write_spinlock, flags);
-release_write_pending_unlocked:
        hidg->write_pending = 0;
        spin_unlock_irqrestore(&hidg->write_spinlock, flags);
 
index b77f3126580ebb937986e7ced5b28739dd25dea4..c2011cd7df8cf5fbf0c5a5db153c37a1b0f451a6 100644 (file)
@@ -945,6 +945,7 @@ net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
                        break;
        }
        if (&req->req != _req) {
+               ep->stopped = stopped;
                spin_unlock_irqrestore(&ep->dev->lock, flags);
                return -EINVAL;
        }
index f63f82450bf4e4960414eb80b3020fda6bfa8c3f..898339e5df10d83d211942609a9bd695f199e787 100644 (file)
@@ -866,9 +866,6 @@ static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
        (void) readl(&ep->dev->pci->pcimstctl);
 
        writel(BIT(DMA_START), &dma->dmastat);
-
-       if (!ep->is_in)
-               stop_out_naking(ep);
 }
 
 static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
@@ -907,6 +904,7 @@ static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
                        writel(BIT(DMA_START), &dma->dmastat);
                        return;
                }
+               stop_out_naking(ep);
        }
 
        tmp = dmactl_default;
@@ -1275,9 +1273,9 @@ static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
                        break;
        }
        if (&req->req != _req) {
+               ep->stopped = stopped;
                spin_unlock_irqrestore(&ep->dev->lock, flags);
-               dev_err(&ep->dev->pdev->dev, "%s: Request mismatch\n",
-                                                               __func__);
+               ep_dbg(ep->dev, "%s: Request mismatch\n", __func__);
                return -EINVAL;
        }
 
index 934584f0a20a7bee30adcee141c0a7eb63a51b3f..6343fbacd2442adea634a9911bed82cf1603c417 100644 (file)
@@ -3204,6 +3204,9 @@ static int __init u132_hcd_init(void)
        printk(KERN_INFO "driver %s\n", hcd_name);
        workqueue = create_singlethread_workqueue("u132");
        retval = platform_driver_register(&u132_platform_driver);
+       if (retval)
+               destroy_workqueue(workqueue);
+
        return retval;
 }
 
index c78be578abb065af0e0715352f8c502345907ad7..d932cc31711e8a0a872efaf35d2ab347fa45106d 100644 (file)
@@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
                return -1;
 
        writel(0, &dbc->regs->control);
-       xhci_dbc_mem_cleanup(xhci);
        dbc->state = DS_DISABLED;
 
        return 0;
@@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
        ret = xhci_do_dbc_stop(xhci);
        spin_unlock_irqrestore(&dbc->lock, flags);
 
-       if (!ret)
+       if (!ret) {
+               xhci_dbc_mem_cleanup(xhci);
                pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+       }
 }
 
 static void
index e2eece6936556b06be37e43a29a3c8554c722203..96a740543183729bb702244151ebb95d88acd97f 100644 (file)
@@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
        port_index = max_ports;
        while (port_index--) {
                u32 t1, t2;
-
+               int retries = 10;
+retry:
                t1 = readl(ports[port_index]->addr);
                t2 = xhci_port_state_to_neutral(t1);
                portsc_buf[port_index] = 0;
 
-               /* Bail out if a USB3 port has a new device in link training */
-               if ((hcd->speed >= HCD_USB3) &&
+               /*
+                * Give a USB3 port in link training time to finish, but don't
+                * prevent suspend as port might be stuck
+                */
+               if ((hcd->speed >= HCD_USB3) && retries-- &&
                    (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
-                       bus_state->bus_suspended = 0;
                        spin_unlock_irqrestore(&xhci->lock, flags);
-                       xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
-                       return -EBUSY;
+                       msleep(XHCI_PORT_POLLING_LFPS_TIME);
+                       spin_lock_irqsave(&xhci->lock, flags);
+                       xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
+                                port_index);
+                       goto retry;
                }
-
                /* suspend ports in U0, or bail out for new connect changes */
                if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
                        if ((t1 & PORT_CSC) && wake_enabled) {
index a6e4637157799769cc0f77b6b2f815c7c4ad6490..671bce18782c5a788ad1af896ab9066fd4078839 100644 (file)
@@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
        if (!xhci_rcar_wait_for_pll_active(hcd))
                return -ETIMEDOUT;
 
+       xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        return xhci_rcar_download_firmware(hcd);
 }
 
index 40fa25c4d0419851bac800bdd74d29bb6f7e0fee..9215a28dad406a724959f0315a0444525d0edb90 100644 (file)
@@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
                }
        }
 
-       if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
-                       DEV_SUPERSPEED_ANY(portsc)) {
+       if ((portsc & PORT_PLC) &&
+           DEV_SUPERSPEED_ANY(portsc) &&
+           ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
+            (portsc & PORT_PLS_MASK) == XDEV_U1 ||
+            (portsc & PORT_PLS_MASK) == XDEV_U2)) {
                xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
-               /* We've just brought the device into U0 through either the
+               /* We've just brought the device into U0/1/2 through either the
                 * Resume state after a device remote wakeup, or through the
                 * U3Exit state after a host-initiated resume.  If it's a device
                 * initiated remote wake, don't pass up the link state change,
index 652dc36e30129c9f15a703b640a52c8f2685a82c..9334cdee382a67a8b783b13b9c88a2d1dd4c3da3 100644 (file)
@@ -452,6 +452,14 @@ struct xhci_op_regs {
  */
 #define XHCI_DEFAULT_BESL      4
 
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME  36
+
 /**
  * struct xhci_intr_reg - Interrupt Register Set
  * @irq_pending:       IMAN - Interrupt Management Register.  Used to enable
index 4d72b7d1d383be2643d09756f11f709a8b1906a5..04684849d68320862a4de40d5de5e5db4487cd7e 100644 (file)
@@ -547,7 +547,7 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
         */
        hub->port_swap = USB251XB_DEF_PORT_SWAP;
        of_property_for_each_u32(np, "swap-dx-lanes", prop, p, port) {
-               if ((port >= 0) && (port <= data->port_cnt))
+               if (port <= data->port_cnt)
                        hub->port_swap |= BIT(port);
        }
 
@@ -612,7 +612,7 @@ static int usb251xb_probe(struct usb251xb *hub)
                                                           dev);
        int err;
 
-       if (np) {
+       if (np && of_id) {
                err = usb251xb_get_ofdata(hub,
                                          (struct usb251xb_data *)of_id->data);
                if (err) {
index bcc23486c4ed2813da698e14faf0e11366577afb..928c2cd6fc0084ef0feb7f79edf6d577e8fa5a46 100644 (file)
@@ -6,6 +6,7 @@ config USB_MTU3
        tristate "MediaTek USB3 Dual Role controller"
        depends on USB || USB_GADGET
        depends on ARCH_MEDIATEK || COMPILE_TEST
+       depends on EXTCON || !EXTCON
        select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
        help
          Say Y or M here if your system runs on MediaTek SoCs with
index fffe23ab0189a00b1a7747662c9248cfd41770ae..979bef9bfb6bc7189e2c16d8dc00ae4ae82d4854 100644 (file)
@@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
        { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
        { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
+       { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
        { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
        { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
        { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
index 8f5b1747175945f8830a909803acdc10b48f7de4..1d8461ae2c340324f64c7796a5be644037f84ddb 100644 (file)
@@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
+       { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
index b863bedb55a138b7a99abe0a82aab39e10b79e3f..5755f0df002589403366a75acec50a40fc86b955 100644 (file)
 /*
  * NovaTech product ids (FTDI_VID)
  */
-#define FTDI_NT_ORIONLXM_PID   0x7c90  /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLXM_PID           0x7c90  /* OrionLXm Substation Automation Platform */
+#define FTDI_NT_ORIONLX_PLUS_PID       0x7c91  /* OrionLX+ Substation Automation Platform */
+#define FTDI_NT_ORION_IO_PID           0x7c92  /* Orion I/O */
 
 /*
  * Synapse Wireless product ids (FTDI_VID)
index fc52ac75fbf66f0f2e7ac2f8951b8df6ea2ba49f..18110225d50606abaefe2e0c90490ffff0888f41 100644 (file)
@@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
        if (!urbtrack)
                return -ENOMEM;
 
-       kref_get(&mos_parport->ref_count);
-       urbtrack->mos_parport = mos_parport;
        urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
        if (!urbtrack->urb) {
                kfree(urbtrack);
@@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
                             usb_sndctrlpipe(usbdev, 0),
                             (unsigned char *)urbtrack->setup,
                             NULL, 0, async_complete, urbtrack);
+       kref_get(&mos_parport->ref_count);
+       urbtrack->mos_parport = mos_parport;
        kref_init(&urbtrack->ref_count);
        INIT_LIST_HEAD(&urbtrack->urblist_entry);
 
index 11b21d9410f35306d299339d7a29554aa9cf47e3..83869065b8022ba68b145db6756cc6f2f9e3b941 100644 (file)
@@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
 #define QUECTEL_PRODUCT_EC25                   0x0125
 #define QUECTEL_PRODUCT_BG96                   0x0296
 #define QUECTEL_PRODUCT_EP06                   0x0306
+#define QUECTEL_PRODUCT_EM12                   0x0512
 
 #define CMOTECH_VENDOR_ID                      0x16d8
 #define CMOTECH_PRODUCT_6001                   0x6001
@@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(3) },
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
-       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+       { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
+         .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
        /* Quectel products using Qualcomm vendor ID */
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
        { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
@@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
          .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
        { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
+         .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
+       { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
        { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
@@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
          .driver_info = RSVD(4) },
        { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff),                     /* D-Link DWM-222 */
          .driver_info = RSVD(4) },
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
-       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
-       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                /* OLICARD300 - MT6225 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) },    /* D-Link DWM-152/C1 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/C1 */
+       { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) },    /* D-Link DWM-156/A3 */
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff),                     /* Olicard 600 */
+         .driver_info = RSVD(4) },
+       { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) },                   /* OLICARD300 - MT6225 */
        { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
        { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
        { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
index 0f62db091d8dab59416fb70af97a44a127503d05..a2233d72ae7c9a919d86f4e0bc39ea26d6cc6aba 100644 (file)
@@ -37,6 +37,7 @@
        S(SRC_ATTACHED),                        \
        S(SRC_STARTUP),                         \
        S(SRC_SEND_CAPABILITIES),               \
+       S(SRC_SEND_CAPABILITIES_TIMEOUT),       \
        S(SRC_NEGOTIATE_CAPABILITIES),          \
        S(SRC_TRANSITION_SUPPLY),               \
        S(SRC_READY),                           \
@@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
                        /* port->hard_reset_count = 0; */
                        port->caps_count = 0;
                        port->pd_capable = true;
-                       tcpm_set_state_cond(port, hard_reset_state(port),
+                       tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
                                            PD_T_SEND_SOURCE_CAP);
                }
                break;
+       case SRC_SEND_CAPABILITIES_TIMEOUT:
+               /*
+                * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
+                *
+                * PD 2.0 sinks are supposed to accept src-capabilities with a
+                * 3.0 header and simply ignore any src PDOs which the sink does
+                * not understand such as PPS but some 2.0 sinks instead ignore
+                * the entire PD_DATA_SOURCE_CAP message, causing contract
+                * negotiation to fail.
+                *
+                * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
+                * sending src-capabilities with a lower PD revision to
+                * make these broken sinks work.
+                */
+               if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
+                       tcpm_set_state(port, HARD_RESET_SEND, 0);
+               } else if (port->negotiated_rev > PD_REV20) {
+                       port->negotiated_rev--;
+                       port->hard_reset_count = 0;
+                       tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+               } else {
+                       tcpm_set_state(port, hard_reset_state(port), 0);
+               }
+               break;
        case SRC_NEGOTIATE_CAPABILITIES:
                ret = tcpm_pd_check_request(port);
                if (ret < 0) {
index 423208e19383c0c2cd414d3b627b8f4c48b6f67b..6770afd4076548eeb0021eef062160013b97b1ea 100644 (file)
@@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
        wcove->dev = &pdev->dev;
        wcove->regmap = pmic->regmap;
 
-       irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
-                                 platform_get_irq(pdev, 0));
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+               return irq;
+       }
+
+       irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
        if (irq < 0)
                return irq;
 
index df7d09409efe3a9512495b6c718ba0bbbebb39b3..8ca333f21292ee7dcb611591aed0e6f03421341b 100644 (file)
 
 #define GUEST_MAPPINGS_TRIES   5
 
+#define VBG_KERNEL_REQUEST \
+       (VMMDEV_REQUESTOR_KERNEL | VMMDEV_REQUESTOR_USR_DRV | \
+        VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN)
+
 /**
  * Reserves memory in which the VMM can relocate any guest mappings
  * that are floating around.
@@ -48,7 +52,8 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
        int i, rc;
 
        /* Query the required space. */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -135,7 +140,8 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
         * Tell the host that we're going to free the memory we reserved for
         * it, the free it up. (Leak the memory if anything goes wrong here.)
         */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return;
 
@@ -172,8 +178,10 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        struct vmmdev_guest_info2 *req2 = NULL;
        int rc, ret = -ENOMEM;
 
-       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
-       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
+       req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO,
+                            VBG_KERNEL_REQUEST);
+       req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2,
+                            VBG_KERNEL_REQUEST);
        if (!req1 || !req2)
                goto out_free;
 
@@ -187,8 +195,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
        req2->additions_minor = VBG_VERSION_MINOR;
        req2->additions_build = VBG_VERSION_BUILD;
        req2->additions_revision = VBG_SVN_REV;
-       /* (no features defined yet) */
-       req2->additions_features = 0;
+       req2->additions_features =
+               VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO;
        strlcpy(req2->name, VBG_VERSION_STRING,
                sizeof(req2->name));
 
@@ -230,7 +238,8 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
        struct vmmdev_guest_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -423,7 +432,8 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
        struct vmmdev_heartbeat *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -457,7 +467,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
 
        gdev->guest_heartbeat_req = vbg_req_alloc(
                                        sizeof(*gdev->guest_heartbeat_req),
-                                       VMMDEVREQ_GUEST_HEARTBEAT);
+                                       VMMDEVREQ_GUEST_HEARTBEAT,
+                                       VBG_KERNEL_REQUEST);
        if (!gdev->guest_heartbeat_req)
                return -ENOMEM;
 
@@ -528,7 +539,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -567,8 +579,14 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -627,7 +645,8 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
        struct vmmdev_mask *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -662,8 +681,14 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
        u32 changed, previous;
        int rc, ret = 0;
 
-       /* Allocate a request buffer before taking the spinlock */
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
+       /*
+        * Allocate a request buffer before taking the spinlock, when
+        * the session is being terminated the requestor is the kernel,
+        * as we're cleaning up.
+        */
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
+                           session_termination ? VBG_KERNEL_REQUEST :
+                                                 session->requestor);
        if (!req) {
                if (!session_termination)
                        return -ENOMEM;
@@ -722,7 +747,8 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
        struct vmmdev_host_version *req;
        int rc, ret;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
@@ -783,19 +809,24 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
 
        gdev->mem_balloon.get_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
-                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
+                             VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ,
+                             VBG_KERNEL_REQUEST);
        gdev->mem_balloon.change_req =
                vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
-                             VMMDEVREQ_CHANGE_MEMBALLOON);
+                             VMMDEVREQ_CHANGE_MEMBALLOON,
+                             VBG_KERNEL_REQUEST);
        gdev->cancel_req =
                vbg_req_alloc(sizeof(*(gdev->cancel_req)),
-                             VMMDEVREQ_HGCM_CANCEL2);
+                             VMMDEVREQ_HGCM_CANCEL2,
+                             VBG_KERNEL_REQUEST);
        gdev->ack_events_req =
                vbg_req_alloc(sizeof(*gdev->ack_events_req),
-                             VMMDEVREQ_ACKNOWLEDGE_EVENTS);
+                             VMMDEVREQ_ACKNOWLEDGE_EVENTS,
+                             VBG_KERNEL_REQUEST);
        gdev->mouse_status_req =
                vbg_req_alloc(sizeof(*gdev->mouse_status_req),
-                             VMMDEVREQ_GET_MOUSE_STATUS);
+                             VMMDEVREQ_GET_MOUSE_STATUS,
+                             VBG_KERNEL_REQUEST);
 
        if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
            !gdev->cancel_req || !gdev->ack_events_req ||
@@ -892,9 +923,9 @@ void vbg_core_exit(struct vbg_dev *gdev)
  * vboxguest_linux.c calls this when userspace opens the char-device.
  * Return: A pointer to the new session or an ERR_PTR on error.
  * @gdev:              The Guest extension device.
- * @user:              Set if this is a session for the vboxuser device.
+ * @requestor:         VMMDEV_REQUESTOR_* flags
  */
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor)
 {
        struct vbg_session *session;
 
@@ -903,7 +934,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
                return ERR_PTR(-ENOMEM);
 
        session->gdev = gdev;
-       session->user_session = user;
+       session->requestor = requestor;
 
        return session;
 }
@@ -924,7 +955,9 @@ void vbg_core_close_session(struct vbg_session *session)
                if (!session->hgcm_client_ids[i])
                        continue;
 
-               vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
+               /* requestor is kernel here, as we're cleaning up. */
+               vbg_hgcm_disconnect(gdev, VBG_KERNEL_REQUEST,
+                                   session->hgcm_client_ids[i], &rc);
        }
 
        kfree(session);
@@ -1152,7 +1185,8 @@ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
                return -EPERM;
        }
 
-       if (trusted_apps_only && session->user_session) {
+       if (trusted_apps_only &&
+           (session->requestor & VMMDEV_REQUESTOR_USER_DEVICE)) {
                vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
                        req->request_type);
                return -EPERM;
@@ -1209,8 +1243,8 @@ static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EMFILE;
 
-       ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
-                              &conn->hdr.rc);
+       ret = vbg_hgcm_connect(gdev, session->requestor, &conn->u.in.loc,
+                              &client_id, &conn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && conn->hdr.rc >= 0) {
@@ -1251,7 +1285,8 @@ static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
        if (i >= ARRAY_SIZE(session->hgcm_client_ids))
                return -EINVAL;
 
-       ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
+       ret = vbg_hgcm_disconnect(gdev, session->requestor, client_id,
+                                 &disconn->hdr.rc);
 
        mutex_lock(&gdev->session_mutex);
        if (ret == 0 && disconn->hdr.rc >= 0)
@@ -1313,12 +1348,12 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
        }
 
        if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
-               ret = vbg_hgcm_call32(gdev, client_id,
+               ret = vbg_hgcm_call32(gdev, session->requestor, client_id,
                                      call->function, call->timeout_ms,
                                      VBG_IOCTL_HGCM_CALL_PARMS32(call),
                                      call->parm_count, &call->hdr.rc);
        else
-               ret = vbg_hgcm_call(gdev, client_id,
+               ret = vbg_hgcm_call(gdev, session->requestor, client_id,
                                    call->function, call->timeout_ms,
                                    VBG_IOCTL_HGCM_CALL_PARMS(call),
                                    call->parm_count, &call->hdr.rc);
@@ -1408,6 +1443,7 @@ static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
 }
 
 static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
+                                    struct vbg_session *session,
                                     struct vbg_ioctl_write_coredump *dump)
 {
        struct vmmdev_write_core_dump *req;
@@ -1415,7 +1451,8 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
        if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
                return -EINVAL;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
+                           session->requestor);
        if (!req)
                return -ENOMEM;
 
@@ -1476,7 +1513,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
        case VBG_IOCTL_CHECK_BALLOON:
                return vbg_ioctl_check_balloon(gdev, data);
        case VBG_IOCTL_WRITE_CORE_DUMP:
-               return vbg_ioctl_write_core_dump(gdev, data);
+               return vbg_ioctl_write_core_dump(gdev, session, data);
        }
 
        /* Variable sized requests. */
@@ -1508,7 +1545,8 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
        struct vmmdev_mouse_status *req;
        int rc;
 
-       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
+       req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
+                           VBG_KERNEL_REQUEST);
        if (!req)
                return -ENOMEM;
 
index 7ad9ec45bfa9d649627f45e9410aebff43cd22c7..4188c12b839f7e74f845cc9524c1917b188dae95 100644 (file)
@@ -154,15 +154,15 @@ struct vbg_session {
         * host. Protected by vbg_gdev.session_mutex.
         */
        u32 guest_caps;
-       /** Does this session belong to a root process or a user one? */
-       bool user_session;
+       /** VMMDEV_REQUESTOR_* flags */
+       u32 requestor;
        /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
        bool cancel_waiters;
 };
 
 int  vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
 void vbg_core_exit(struct vbg_dev *gdev);
-struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
+struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor);
 void vbg_core_close_session(struct vbg_session *session);
 int  vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
 int  vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
@@ -172,12 +172,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
 void vbg_linux_mouse_event(struct vbg_dev *gdev);
 
 /* Private (non exported) functions form vboxguest_utils.c */
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor);
 void vbg_req_free(void *req, size_t len);
 int vbg_req_perform(struct vbg_dev *gdev, void *req);
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status);
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status);
 
 #endif
index 6e2a9619192d2317f8f449fbb5f9c24d0699e3f8..6e8c0f1c1056296e983fd70af5de7c405392c3ee 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright (C) 2006-2016 Oracle Corporation
  */
 
+#include <linux/cred.h>
 #include <linux/input.h>
 #include <linux/kernel.h>
 #include <linux/miscdevice.h>
@@ -28,6 +29,23 @@ static DEFINE_MUTEX(vbg_gdev_mutex);
 /** Global vbg_gdev pointer used by vbg_get/put_gdev. */
 static struct vbg_dev *vbg_gdev;
 
+static u32 vbg_misc_device_requestor(struct inode *inode)
+{
+       u32 requestor = VMMDEV_REQUESTOR_USERMODE |
+                       VMMDEV_REQUESTOR_CON_DONT_KNOW |
+                       VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
+
+       if (from_kuid(current_user_ns(), current->cred->uid) == 0)
+               requestor |= VMMDEV_REQUESTOR_USR_ROOT;
+       else
+               requestor |= VMMDEV_REQUESTOR_USR_USER;
+
+       if (in_egroup_p(inode->i_gid))
+               requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
+
+       return requestor;
+}
+
 static int vbg_misc_device_open(struct inode *inode, struct file *filp)
 {
        struct vbg_session *session;
@@ -36,7 +54,7 @@ static int vbg_misc_device_open(struct inode *inode, struct file *filp)
        /* misc_open sets filp->private_data to our misc device */
        gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -53,7 +71,8 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
        gdev = container_of(filp->private_data, struct vbg_dev,
                            misc_device_user);
 
-       session = vbg_core_open_session(gdev, false);
+       session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
+                                             VMMDEV_REQUESTOR_USER_DEVICE);
        if (IS_ERR(session))
                return PTR_ERR(session);
 
@@ -115,7 +134,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
                         req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
 
        if (is_vmmdev_req)
-               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
+               buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
+                                   session->requestor);
        else
                buf = kmalloc(size, GFP_KERNEL);
        if (!buf)
index bf4474214b4d31bb708c3d9c302d6ce415e17c7a..75fd140b02ff8aa41816a1f0284a4926c214df7e 100644 (file)
@@ -62,7 +62,8 @@ VBG_LOG(vbg_err, pr_err);
 VBG_LOG(vbg_debug, pr_debug);
 #endif
 
-void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
+void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type,
+                   u32 requestor)
 {
        struct vmmdev_request_header *req;
        int order = get_order(PAGE_ALIGN(len));
@@ -78,7 +79,7 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
        req->request_type = req_type;
        req->rc = VERR_GENERAL_FAILURE;
        req->reserved1 = 0;
-       req->reserved2 = 0;
+       req->requestor = requestor;
 
        return req;
 }
@@ -119,7 +120,7 @@ static bool hgcm_req_done(struct vbg_dev *gdev,
        return done;
 }
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status)
 {
@@ -127,7 +128,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
        int rc;
 
        hgcm_connect = vbg_req_alloc(sizeof(*hgcm_connect),
-                                    VMMDEVREQ_HGCM_CONNECT);
+                                    VMMDEVREQ_HGCM_CONNECT, requestor);
        if (!hgcm_connect)
                return -ENOMEM;
 
@@ -153,13 +154,15 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
 }
 EXPORT_SYMBOL(vbg_hgcm_connect);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status)
 {
        struct vmmdev_hgcm_disconnect *hgcm_disconnect = NULL;
        int rc;
 
        hgcm_disconnect = vbg_req_alloc(sizeof(*hgcm_disconnect),
-                                       VMMDEVREQ_HGCM_DISCONNECT);
+                                       VMMDEVREQ_HGCM_DISCONNECT,
+                                       requestor);
        if (!hgcm_disconnect)
                return -ENOMEM;
 
@@ -593,9 +596,10 @@ static int hgcm_call_copy_back_result(
        return 0;
 }
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status)
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status)
 {
        struct vmmdev_hgcm_call *call;
        void **bounce_bufs = NULL;
@@ -615,7 +619,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
                goto free_bounce_bufs;
        }
 
-       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL);
+       call = vbg_req_alloc(size, VMMDEVREQ_HGCM_CALL, requestor);
        if (!call) {
                ret = -ENOMEM;
                goto free_bounce_bufs;
@@ -647,9 +651,9 @@ EXPORT_SYMBOL(vbg_hgcm_call);
 
 #ifdef CONFIG_COMPAT
 int vbg_hgcm_call32(
-       struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
-       struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
-       int *vbox_status)
+       struct vbg_dev *gdev, u32 requestor, u32 client_id, u32 function,
+       u32 timeout_ms, struct vmmdev_hgcm_function_parameter32 *parm32,
+       u32 parm_count, int *vbox_status)
 {
        struct vmmdev_hgcm_function_parameter *parm64 = NULL;
        u32 i, size;
@@ -689,7 +693,7 @@ int vbg_hgcm_call32(
                        goto out_free;
        }
 
-       ret = vbg_hgcm_call(gdev, client_id, function, timeout_ms,
+       ret = vbg_hgcm_call(gdev, requestor, client_id, function, timeout_ms,
                            parm64, parm_count, vbox_status);
        if (ret < 0)
                goto out_free;
index 77f0c8f8a23112f1d3c16b237514237767bab398..84834dad38d5c431d161607989080e758bb62b7b 100644 (file)
@@ -9,11 +9,10 @@
 #ifndef __VBOX_VERSION_H__
 #define __VBOX_VERSION_H__
 
-/* Last synced October 4th 2017 */
-#define VBG_VERSION_MAJOR 5
-#define VBG_VERSION_MINOR 2
+#define VBG_VERSION_MAJOR 6
+#define VBG_VERSION_MINOR 0
 #define VBG_VERSION_BUILD 0
-#define VBG_SVN_REV 68940
-#define VBG_VERSION_STRING "5.2.0"
+#define VBG_SVN_REV 127566
+#define VBG_VERSION_STRING "6.0.0"
 
 #endif
index 5e2ae978935de3630cfda2184534bb838620ea6f..6337b8d75d960bdefc5c8119a187d185ff918dfb 100644 (file)
@@ -98,8 +98,8 @@ struct vmmdev_request_header {
        s32 rc;
        /** Reserved field no.1. MBZ. */
        u32 reserved1;
-       /** Reserved field no.2. MBZ. */
-       u32 reserved2;
+       /** IN: Requestor information (VMMDEV_REQUESTOR_*) */
+       u32 requestor;
 };
 VMMDEV_ASSERT_SIZE(vmmdev_request_header, 24);
 
@@ -247,6 +247,8 @@ struct vmmdev_guest_info {
 };
 VMMDEV_ASSERT_SIZE(vmmdev_guest_info, 24 + 8);
 
+#define VMMDEV_GUEST_INFO2_ADDITIONS_FEATURES_REQUESTOR_INFO   BIT(0)
+
 /** struct vmmdev_guestinfo2 - Guest information report, version 2. */
 struct vmmdev_guest_info2 {
        /** Header. */
@@ -259,7 +261,7 @@ struct vmmdev_guest_info2 {
        u32 additions_build;
        /** SVN revision. */
        u32 additions_revision;
-       /** Feature mask, currently unused. */
+       /** Feature mask. */
        u32 additions_features;
        /**
         * The intentional meaning of this field was:
index ca08c83168f5fbf1f7f6b52c8c3ff769bf70cf04..0b37867b5c202332b66ba5bede2a31e4287a23e0 100644 (file)
@@ -1515,8 +1515,8 @@ static int afs_fs_setattr_size64(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
-       *bp++ = 0;
+       *bp++ = htonl(attr->ia_size >> 32);     /* position of start of write */
+       *bp++ = htonl((u32) attr->ia_size);
        *bp++ = 0;                              /* size of write */
        *bp++ = 0;
        *bp++ = htonl(attr->ia_size >> 32);     /* new file length */
@@ -1564,7 +1564,7 @@ static int afs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
 
        xdr_encode_AFS_StoreStatus(&bp, attr);
 
-       *bp++ = 0;                              /* position of start of write */
+       *bp++ = htonl(attr->ia_size);           /* position of start of write */
        *bp++ = 0;                              /* size of write */
        *bp++ = htonl(attr->ia_size);           /* new file length */
 
index 5aa57929e8c23559c41b8a875f3ea2db43a364dc..6e97a42d24d130471a97a28510ec3712605c50cd 100644 (file)
@@ -1514,7 +1514,7 @@ static int yfs_fs_setattr_size(struct afs_fs_cursor *fc, struct iattr *attr)
        bp = xdr_encode_u32(bp, 0); /* RPC flags */
        bp = xdr_encode_YFSFid(bp, &vnode->fid);
        bp = xdr_encode_YFS_StoreStatus(bp, attr);
-       bp = xdr_encode_u64(bp, 0);             /* position of start of write */
+       bp = xdr_encode_u64(bp, attr->ia_size); /* position of start of write */
        bp = xdr_encode_u64(bp, 0);             /* size of write */
        bp = xdr_encode_u64(bp, attr->ia_size); /* new file length */
        yfs_check_req(call, bp);
index 1d49694e6ae3226044c8d9464cca09f78889d335..c5880329ae37c661b4e87b3cafc0599e776f242d 100644 (file)
@@ -6174,7 +6174,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
         *
         * This is overestimating in most cases.
         */
-       qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
+       qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
 
        spin_lock(&block_rsv->lock);
        block_rsv->size = reserve_size;
index eb680b715dd6b2f50e7d98026a8dcc256797eacc..e659d9d6110733845b35309703d9dff73cb78689 100644 (file)
@@ -1922,8 +1922,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
        int i;
 
        /* Level sanity check */
-       if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
-           root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
+       if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
+           root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
            root_level < cur_level) {
                btrfs_err_rl(fs_info,
                        "%s: bad levels, cur_level=%d root_level=%d",
index 1869ba8e5981c948c435bf26e3c2f9d8016770b0..67a6f7d4740230aaa24b1ecad5e91ec5b94b5f70 100644 (file)
@@ -2430,8 +2430,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
                        bitmap_clear(rbio->dbitmap, pagenr, 1);
                kunmap(p);
 
-               for (stripe = 0; stripe < rbio->real_stripes; stripe++)
+               for (stripe = 0; stripe < nr_data; stripe++)
                        kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+               kunmap(p_page);
        }
 
        __free_page(p_page);
index acdad6d658f54bda7cf9c379867d212a41d1c24b..e4e665f422fc4c87b05181211b73ca2097e3b7c2 100644 (file)
@@ -1886,8 +1886,10 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
        }
 }
 
-static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline int btrfs_start_delalloc_flush(struct btrfs_trans_handle *trans)
 {
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+
        /*
         * We use writeback_inodes_sb here because if we used
         * btrfs_start_delalloc_roots we would deadlock with fs freeze.
@@ -1897,15 +1899,50 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
         * from already being in a transaction and our join_transaction doesn't
         * have to re-take the fs freeze lock.
         */
-       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
                writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+       } else {
+               struct btrfs_pending_snapshot *pending;
+               struct list_head *head = &trans->transaction->pending_snapshots;
+
+               /*
+                * Flush dellaloc for any root that is going to be snapshotted.
+                * This is done to avoid a corrupted version of files, in the
+                * snapshots, that had both buffered and direct IO writes (even
+                * if they were done sequentially) due to an unordered update of
+                * the inode's size on disk.
+                */
+               list_for_each_entry(pending, head, list) {
+                       int ret;
+
+                       ret = btrfs_start_delalloc_snapshot(pending->root);
+                       if (ret)
+                               return ret;
+               }
+       }
        return 0;
 }
 
-static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
+static inline void btrfs_wait_delalloc_flush(struct btrfs_trans_handle *trans)
 {
-       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
+       struct btrfs_fs_info *fs_info = trans->fs_info;
+
+       if (btrfs_test_opt(fs_info, FLUSHONCOMMIT)) {
                btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
+       } else {
+               struct btrfs_pending_snapshot *pending;
+               struct list_head *head = &trans->transaction->pending_snapshots;
+
+               /*
+                * Wait for any dellaloc that we started previously for the roots
+                * that are going to be snapshotted. This is to avoid a corrupted
+                * version of files in the snapshots that had both buffered and
+                * direct IO writes (even if they were done sequentially).
+                */
+               list_for_each_entry(pending, head, list)
+                       btrfs_wait_ordered_extents(pending->root,
+                                                  U64_MAX, 0, U64_MAX);
+       }
 }
 
 int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
@@ -2023,7 +2060,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
 
        extwriter_counter_dec(cur_trans, trans->type);
 
-       ret = btrfs_start_delalloc_flush(fs_info);
+       ret = btrfs_start_delalloc_flush(trans);
        if (ret)
                goto cleanup_transaction;
 
@@ -2039,7 +2076,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
        if (ret)
                goto cleanup_transaction;
 
-       btrfs_wait_delalloc_flush(fs_info);
+       btrfs_wait_delalloc_flush(trans);
 
        btrfs_scrub_pause(fs_info);
        /*
index f06454a55e00cb4df0f71f03eb0013adbae1e4f4..561884f60d35c36e11928e28e5007901fc695198 100644 (file)
@@ -3578,9 +3578,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
        }
        btrfs_release_path(path);
 
-       /* find the first key from this transaction again */
+       /*
+        * Find the first key from this transaction again.  See the note for
+        * log_new_dir_dentries, if we're logging a directory recursively we
+        * won't be holding its i_mutex, which means we can modify the directory
+        * while we're logging it.  If we remove an entry between our first
+        * search and this search we'll not find the key again and can just
+        * bail.
+        */
        ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
-       if (WARN_ON(ret != 0))
+       if (ret != 0)
                goto done;
 
        /*
@@ -4544,6 +4551,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
                item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                      struct btrfs_inode_item);
                *size_ret = btrfs_inode_size(path->nodes[0], item);
+               /*
+                * If the in-memory inode's i_size is smaller then the inode
+                * size stored in the btree, return the inode's i_size, so
+                * that we get a correct inode size after replaying the log
+                * when before a power failure we had a shrinking truncate
+                * followed by addition of a new name (rename / new hard link).
+                * Otherwise return the inode size from the btree, to avoid
+                * data loss when replaying a log due to previously doing a
+                * write that expands the inode's size and logging a new name
+                * immediately after.
+                */
+               if (*size_ret > inode->vfs_inode.i_size)
+                       *size_ret = inode->vfs_inode.i_size;
        }
 
        btrfs_release_path(path);
@@ -4705,15 +4725,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
                                        struct btrfs_file_extent_item);
 
                if (btrfs_file_extent_type(leaf, extent) ==
-                   BTRFS_FILE_EXTENT_INLINE) {
-                       len = btrfs_file_extent_ram_bytes(leaf, extent);
-                       ASSERT(len == i_size ||
-                              (len == fs_info->sectorsize &&
-                               btrfs_file_extent_compression(leaf, extent) !=
-                               BTRFS_COMPRESS_NONE) ||
-                              (len < i_size && i_size < fs_info->sectorsize));
+                   BTRFS_FILE_EXTENT_INLINE)
                        return 0;
-               }
 
                len = btrfs_file_extent_num_bytes(leaf, extent);
                /* Last extent goes beyond i_size, no need to log a hole. */
index 9024eee889b9838caa2799ca51f439106a955ff0..db934ceae9c109f39eade85457a25b8ce99604ae 100644 (file)
@@ -6407,7 +6407,7 @@ static void btrfs_end_bio(struct bio *bio)
                                if (bio_op(bio) == REQ_OP_WRITE)
                                        btrfs_dev_stat_inc_and_print(dev,
                                                BTRFS_DEV_STAT_WRITE_ERRS);
-                               else
+                               else if (!(bio->bi_opf & REQ_RAHEAD))
                                        btrfs_dev_stat_inc_and_print(dev,
                                                BTRFS_DEV_STAT_READ_ERRS);
                                if (bio->bi_opf & REQ_PREFLUSH)
index e3346628efe2e221c844db3af6b4336d07b4f7f1..2d61ddda9bf5653fb559fb320422fd84ec470419 100644 (file)
@@ -524,6 +524,7 @@ static void ceph_i_callback(struct rcu_head *head)
        struct inode *inode = container_of(head, struct inode, i_rcu);
        struct ceph_inode_info *ci = ceph_inode(inode);
 
+       kfree(ci->i_symlink);
        kmem_cache_free(ceph_inode_cachep, ci);
 }
 
@@ -566,7 +567,6 @@ void ceph_destroy_inode(struct inode *inode)
                }
        }
 
-       kfree(ci->i_symlink);
        while ((n = rb_first(&ci->i_fragtree)) != NULL) {
                frag = rb_entry(n, struct ceph_inode_frag, node);
                rb_erase(n, &ci->i_fragtree);
index 842e8f749db64eb6ee17297e1039bb7e2c4ea2b7..570d71043acf982976d3098cf5e4beaee7532241 100644 (file)
@@ -410,7 +410,7 @@ bool fs_validate_description(const struct fs_parameter_description *desc)
                        for (param = desc->specs; param->name; param++) {
                                if (param->opt == e->opt &&
                                    param->type != fs_param_is_enum) {
-                                       pr_err("VALIDATE %s: e[%lu] enum val for %s\n",
+                                       pr_err("VALIDATE %s: e[%tu] enum val for %s\n",
                                               name, e - desc->enums, param->name);
                                        good = false;
                                }
index 6aaa30580a2b2057fca13404a44e0b3773450288..bbdbd56cf2ac9384b83e78945b2c6c0031cc346d 100644 (file)
@@ -1022,6 +1022,8 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
 
        ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
        if (!ret) {
+               ssize_t ret2;
+
                /*
                 * Open-code file_start_write here to grab freeze protection,
                 * which will be released by another thread in
@@ -1036,7 +1038,19 @@ static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
                                                SB_FREEZE_WRITE);
                }
                kiocb->ki_flags |= IOCB_WRITE;
-               io_rw_done(kiocb, call_write_iter(file, kiocb, &iter));
+
+               ret2 = call_write_iter(file, kiocb, &iter);
+               if (!force_nonblock || ret2 != -EAGAIN) {
+                       io_rw_done(kiocb, ret2);
+               } else {
+                       /*
+                        * If ->needs_lock is true, we're already in async
+                        * context.
+                        */
+                       if (!s->needs_lock)
+                               io_async_list_note(WRITE, req, iov_count);
+                       ret = -EAGAIN;
+               }
        }
 out_free:
        kfree(iovec);
@@ -1968,7 +1982,15 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
                return 0;
 
        if (sig) {
-               ret = set_user_sigmask(sig, &ksigmask, &sigsaved, sigsz);
+#ifdef CONFIG_COMPAT
+               if (in_compat_syscall())
+                       ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
+                                                     &ksigmask, &sigsaved, sigsz);
+               else
+#endif
+                       ret = set_user_sigmask(sig, &ksigmask,
+                                              &sigsaved, sigsz);
+
                if (ret)
                        return ret;
        }
index 93fb7cf0b92b631358cf36eab60d5947cc0312a7..f0b5c987d6ae14cc39a281668d5daf4d658cbe67 100644 (file)
@@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
 
        WARN_ON_ONCE(host->h_server);
 
-       if (refcount_dec_and_test(&host->h_count)) {
+       if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
                WARN_ON_ONCE(!list_empty(&host->h_lockowners));
                WARN_ON_ONCE(!list_empty(&host->h_granted));
                WARN_ON_ONCE(!list_empty(&host->h_reclaim));
 
-               mutex_lock(&nlm_host_mutex);
                nlm_destroy_host_locked(host);
                mutex_unlock(&nlm_host_mutex);
        }
index eaa1cfaf73b08c8fda256ea57ab816dcc4985217..71d0c6c2aac5ccde4d69cdf7396fbdbdfb40c3e6 100644 (file)
@@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
                         */
                        error = -EDEADLK;
                        spin_lock(&blocked_lock_lock);
+                       /*
+                        * Ensure that we don't find any locks blocked on this
+                        * request during deadlock detection.
+                        */
+                       __locks_wake_up_blocks(request);
                        if (likely(!posix_locks_deadlock(request, fl))) {
                                error = FILE_LOCK_DEFERRED;
                                __locks_insert_block(fl, request,
index fb1cf1a4bda2a105e60cb23d95dea4b3abc09f70..90d71fda65cecfb3958cc4391240e2a09bac783e 100644 (file)
@@ -453,7 +453,7 @@ void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
        case XPRT_TRANSPORT_RDMA:
                if (retrans == NFS_UNSPEC_RETRANS)
                        to->to_retries = NFS_DEF_TCP_RETRANS;
-               if (timeo == NFS_UNSPEC_TIMEO || to->to_retries == 0)
+               if (timeo == NFS_UNSPEC_TIMEO || to->to_initval == 0)
                        to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10;
                if (to->to_initval > NFS_MAX_TCP_TIMEOUT)
                        to->to_initval = NFS_MAX_TCP_TIMEOUT;
index f9264e1922a28b836367b145c215d9ceb8883843..6673d4ff5a2a846c01e2de3e909e167da30156cb 100644 (file)
@@ -1289,6 +1289,7 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg,
 static int ff_layout_read_done_cb(struct rpc_task *task,
                                struct nfs_pgio_header *hdr)
 {
+       int new_idx = hdr->pgio_mirror_idx;
        int err;
 
        trace_nfs4_pnfs_read(hdr, task->tk_status);
@@ -1307,7 +1308,7 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
        case -NFS4ERR_RESET_TO_PNFS:
                if (ff_layout_choose_best_ds_for_read(hdr->lseg,
                                        hdr->pgio_mirror_idx + 1,
-                                       &hdr->pgio_mirror_idx))
+                                       &new_idx))
                        goto out_layouterror;
                set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
                return task->tk_status;
@@ -1320,7 +1321,9 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
 
        return 0;
 out_layouterror:
+       ff_layout_read_record_layoutstats_done(task, hdr);
        ff_layout_send_layouterror(hdr->lseg);
+       hdr->pgio_mirror_idx = new_idx;
 out_eagain:
        rpc_restart_call_prepare(task);
        return -EAGAIN;
index 4dbb0ee234324db3275de7c7a26fc3bcd040171a..741ff8c9c6ed3f7cda214ec0157eb6d9461ebdca 100644 (file)
@@ -2933,7 +2933,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
        }
 
 out:
-       nfs4_sequence_free_slot(&opendata->o_res.seq_res);
+       if (!opendata->cancelled)
+               nfs4_sequence_free_slot(&opendata->o_res.seq_res);
        return ret;
 }
 
@@ -6301,7 +6302,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
        p->arg.seqid = seqid;
        p->res.seqid = seqid;
        p->lsp = lsp;
-       refcount_inc(&lsp->ls_count);
        /* Ensure we don't close file until we're done freeing locks! */
        p->ctx = get_nfs_open_context(ctx);
        p->l_ctx = nfs_get_lock_context(ctx);
@@ -6526,7 +6526,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
        p->res.lock_seqid = p->arg.lock_seqid;
        p->lsp = lsp;
        p->server = server;
-       refcount_inc(&lsp->ls_count);
        p->ctx = get_nfs_open_context(ctx);
        locks_init_lock(&p->fl);
        locks_copy_lock(&p->fl, fl);
index a35259eebc56739b59bf7ffb5029e647dc11ad0f..1dc9a08e8bdc7b7c96a5668072faddf75b0285c2 100644 (file)
@@ -4719,22 +4719,23 @@ out:
 
 /* Lock an inode and grab a bh pointing to the inode. */
 int ocfs2_reflink_inodes_lock(struct inode *s_inode,
-                             struct buffer_head **bh1,
+                             struct buffer_head **bh_s,
                              struct inode *t_inode,
-                             struct buffer_head **bh2)
+                             struct buffer_head **bh_t)
 {
-       struct inode *inode1;
-       struct inode *inode2;
+       struct inode *inode1 = s_inode;
+       struct inode *inode2 = t_inode;
        struct ocfs2_inode_info *oi1;
        struct ocfs2_inode_info *oi2;
+       struct buffer_head *bh1 = NULL;
+       struct buffer_head *bh2 = NULL;
        bool same_inode = (s_inode == t_inode);
+       bool need_swap = (inode1->i_ino > inode2->i_ino);
        int status;
 
        /* First grab the VFS and rw locks. */
        lock_two_nondirectories(s_inode, t_inode);
-       inode1 = s_inode;
-       inode2 = t_inode;
-       if (inode1->i_ino > inode2->i_ino)
+       if (need_swap)
                swap(inode1, inode2);
 
        status = ocfs2_rw_lock(inode1, 1);
@@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
        trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
                                (unsigned long long)oi2->ip_blkno);
 
-       if (*bh1)
-               *bh1 = NULL;
-       if (*bh2)
-               *bh2 = NULL;
-
        /* We always want to lock the one with the lower lockid first. */
        if (oi1->ip_blkno > oi2->ip_blkno)
                mlog_errno(-ENOLCK);
 
        /* lock id1 */
-       status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
+       status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
+                                        OI_LS_REFLINK_TARGET);
        if (status < 0) {
                if (status != -ENOENT)
                        mlog_errno(status);
@@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
        /* lock id2 */
        if (!same_inode) {
-               status = ocfs2_inode_lock_nested(inode2, bh2, 1,
+               status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
                                                 OI_LS_REFLINK_TARGET);
                if (status < 0) {
                        if (status != -ENOENT)
                                mlog_errno(status);
                        goto out_cl1;
                }
-       } else
-               *bh2 = *bh1;
+       } else {
+               bh2 = bh1;
+       }
+
+       /*
+        * If we swapped inode order above, we have to swap the buffer heads
+        * before passing them back to the caller.
+        */
+       if (need_swap)
+               swap(bh1, bh2);
+       *bh_s = bh1;
+       *bh_t = bh2;
 
        trace_ocfs2_double_lock_end(
                        (unsigned long long)oi1->ip_blkno,
@@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
 
 out_cl1:
        ocfs2_inode_unlock(inode1, 1);
-       brelse(*bh1);
-       *bh1 = NULL;
+       brelse(bh1);
 out_rw2:
        ocfs2_rw_unlock(inode2, 1);
 out_i2:
index 0285ce7dbd515c8c7bfd9e63f0211cabfb818801..f1c2f855fd43c7664a739c2172f83be2f93944e4 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
                return 0;
        }
 
+       /* Any file opened for execve()/uselib() has to be a regular file. */
+       if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
+               error = -EACCES;
+               goto cleanup_file;
+       }
+
        if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
                error = get_write_access(inode);
                if (unlikely(error))
index d29d869abec17c3b6d4b56d6500e9b3d510cd385..f5834488b67d564d91b5534c210fc7fcd8e95969 100644 (file)
@@ -615,7 +615,7 @@ static void __init proc_kcore_text_init(void)
 /*
  * MODULES_VADDR has no intersection with VMALLOC_ADDR.
  */
-struct kcore_list kcore_modules;
+static struct kcore_list kcore_modules;
 static void __init add_modules_range(void)
 {
        if (MODULES_VADDR != VMALLOC_START && MODULES_END != VMALLOC_END) {
index 4d598a399bbff1b32becb1cf24406f9b9e80287c..d653907275419435e4bad20de2f1a704b5c9d6c3 100644 (file)
@@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
        if (--header->nreg)
                return;
 
-       put_links(header);
+       if (parent)
+               put_links(header);
        start_unregistering(header);
        if (!--header->count)
                kfree_rcu(header, rcu);
index 48502cb9990f184a55b780372adaef3bda406509..4637ae1ae91ca8ef6007c05ba060dd9fb208fdf1 100644 (file)
@@ -1191,7 +1191,10 @@ xfs_iread_extents(
         * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
         */
        level = be16_to_cpu(block->bb_level);
-       ASSERT(level > 0);
+       if (unlikely(level == 0)) {
+               XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+               return -EFSCORRUPTED;
+       }
        pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
        bno = be64_to_cpu(*pp);
 
@@ -4249,9 +4252,13 @@ xfs_bmapi_write(
        struct xfs_bmbt_irec    *mval,          /* output: map values */
        int                     *nmap)          /* i/o: mval size/count */
 {
+       struct xfs_bmalloca     bma = {
+               .tp             = tp,
+               .ip             = ip,
+               .total          = total,
+       };
        struct xfs_mount        *mp = ip->i_mount;
        struct xfs_ifork        *ifp;
-       struct xfs_bmalloca     bma = { NULL }; /* args for xfs_bmap_alloc */
        xfs_fileoff_t           end;            /* end of mapped file region */
        bool                    eof = false;    /* after the end of extents */
        int                     error;          /* error return */
@@ -4319,10 +4326,6 @@ xfs_bmapi_write(
                eof = true;
        if (!xfs_iext_peek_prev_extent(ifp, &bma.icur, &bma.prev))
                bma.prev.br_startoff = NULLFILEOFF;
-       bma.tp = tp;
-       bma.ip = ip;
-       bma.total = total;
-       bma.datatype = 0;
        bma.minleft = xfs_bmapi_minleft(tp, ip, whichfork);
 
        n = 0;
index 6f94d1f7322d0a33bd00134c8136ac3831b4fe2c..117910db51b809ebeea0196182e05f0dd0c54611 100644 (file)
@@ -415,8 +415,17 @@ xchk_btree_check_owner(
        struct xfs_btree_cur    *cur = bs->cur;
        struct check_owner      *co;
 
-       if ((cur->bc_flags & XFS_BTREE_ROOT_IN_INODE) && bp == NULL)
+       /*
+        * In theory, xfs_btree_get_block should only give us a null buffer
+        * pointer for the root of a root-in-inode btree type, but we need
+        * to check defensively here in case the cursor state is also screwed
+        * up.
+        */
+       if (bp == NULL) {
+               if (!(cur->bc_flags & XFS_BTREE_ROOT_IN_INODE))
+                       xchk_btree_set_corrupt(bs->sc, bs->cur, level);
                return 0;
+       }
 
        /*
         * We want to cross-reference each btree block with the bnobt
index f1260b4bfdeed62440cc238138e7fd405c2dccf1..90527b094878971f831c78daafe2483dd99e83d2 100644 (file)
@@ -574,6 +574,11 @@ xchk_da_btree(
                /* Drill another level deeper. */
                blkno = be32_to_cpu(key->before);
                level++;
+               if (level >= XFS_DA_NODE_MAXDEPTH) {
+                       /* Too deep! */
+                       xchk_da_set_corrupt(&ds, level - 1);
+                       break;
+               }
                ds.tree_level--;
                error = xchk_da_btree_block(&ds, level, blkno);
                if (error)
index 93f07edafd8183a14ca55fae7ffdfad0370ccc89..9ee2a7d02e7059f29c103da1088b7c854ca023bb 100644 (file)
@@ -161,6 +161,14 @@ xfs_ioc_trim(
                return -EPERM;
        if (!blk_queue_discard(q))
                return -EOPNOTSUPP;
+
+       /*
+        * We haven't recovered the log, so we cannot use our bnobt-guided
+        * storage zapping commands.
+        */
+       if (mp->m_flags & XFS_MOUNT_NORECOVERY)
+               return -EROFS;
+
        if (copy_from_user(&range, urange, sizeof(range)))
                return -EFAULT;
 
index 1f2e2845eb76c2c78a932c913057e1028cec2f05..a7ceae90110eded646f13acf4314131574d46a69 100644 (file)
@@ -529,18 +529,17 @@ xfs_file_dio_aio_write(
        count = iov_iter_count(from);
 
        /*
-        * If we are doing unaligned IO, wait for all other IO to drain,
-        * otherwise demote the lock if we had to take the exclusive lock
-        * for other reasons in xfs_file_aio_write_checks.
+        * If we are doing unaligned IO, we can't allow any other overlapping IO
+        * in-flight at the same time or we risk data corruption. Wait for all
+        * other IO to drain before we submit. If the IO is aligned, demote the
+        * iolock if we had to take the exclusive lock in
+        * xfs_file_aio_write_checks() for other reasons.
         */
        if (unaligned_io) {
-               /* If we are going to wait for other DIO to finish, bail */
-               if (iocb->ki_flags & IOCB_NOWAIT) {
-                       if (atomic_read(&inode->i_dio_count))
-                               return -EAGAIN;
-               } else {
-                       inode_dio_wait(inode);
-               }
+               /* unaligned dio always waits, bail */
+               if (iocb->ki_flags & IOCB_NOWAIT)
+                       return -EAGAIN;
+               inode_dio_wait(inode);
        } else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_ilock_demote(ip, XFS_IOLOCK_EXCL);
                iolock = XFS_IOLOCK_SHARED;
@@ -548,6 +547,14 @@ xfs_file_dio_aio_write(
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos);
        ret = iomap_dio_rw(iocb, from, &xfs_iomap_ops, xfs_dio_write_end_io);
+
+       /*
+        * If unaligned, this is the only IO in-flight. If it has not yet
+        * completed, wait on it before we release the iolock to prevent
+        * subsequent overlapping IO.
+        */
+       if (ret == -EIOCBQUEUED && unaligned_io)
+               inode_dio_wait(inode);
 out:
        xfs_iunlock(ip, iolock);
 
index 30b1ae53689fcffcb6a7a0b20437745c6cebd43c..c50542dc71e0b24912571eaa09039a98d3a6ef9f 100644 (file)
 
 /* Defaults for debug_level, debug and normal */
 
+#ifndef ACPI_DEBUG_DEFAULT
 #define ACPI_DEBUG_DEFAULT          (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_EVALUATION | ACPI_LV_REPAIR)
+#endif
+
 #define ACPI_NORMAL_DEFAULT         (ACPI_LV_INIT | ACPI_LV_DEBUG_OBJECT | ACPI_LV_REPAIR)
 #define ACPI_DEBUG_ALL              (ACPI_LV_AML_DISASSEMBLE | ACPI_LV_ALL_EXCEPTIONS | ACPI_LV_ALL)
 
index 9ff328fd946a2b8ef332b6d13bdfc07ce403f5e4..624b90b340852c65104d8de894fb7d9a5a555f53 100644 (file)
 #define ACPI_NO_ERROR_MESSAGES
 #undef ACPI_DEBUG_OUTPUT
 
+/* Use a specific bugging default separate from ACPICA */
+
+#undef ACPI_DEBUG_DEFAULT
+#define ACPI_DEBUG_DEFAULT          (ACPI_LV_INFO | ACPI_LV_REPAIR)
+
 /* External interface for __KERNEL__, stub is needed */
 
 #define ACPI_EXTERNAL_RETURN_STATUS(prototype) \
index d5cfc0b15b7640e6c4b99fc05b479f25f5df7f44..f6034ba774be313a86f006ee2e0fbe96403de6f6 100644 (file)
@@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb)
 #define AARP_RESOLVE_TIME      (10 * HZ)
 
 extern struct datalink_proto *ddp_dl, *aarp_dl;
-extern void aarp_proto_init(void);
+extern int aarp_proto_init(void);
 
 /* Inter module exports */
 
index a2132e09dc1c422731e9533a2a2efa2f5b0132dd..f02367faa58dbe44171454de6af50777ddc0ebf3 100644 (file)
@@ -193,7 +193,6 @@ enum bpf_arg_type {
 
        ARG_PTR_TO_CTX,         /* pointer to context */
        ARG_ANYTHING,           /* any (initialized) argument is ok */
-       ARG_PTR_TO_SOCKET,      /* pointer to bpf_sock */
        ARG_PTR_TO_SPIN_LOCK,   /* pointer to bpf_spin_lock */
        ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */
 };
index 69f7a3449eda83a8a25fd1f5ac3dedc36108deba..7d8228d1c8981d9b73fb72a8953687c1f550eb19 100644 (file)
@@ -66,6 +66,46 @@ struct bpf_reg_state {
         * same reference to the socket, to determine proper reference freeing.
         */
        u32 id;
+       /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
+        * from a pointer-cast helper, bpf_sk_fullsock() and
+        * bpf_tcp_sock().
+        *
+        * Consider the following where "sk" is a reference counted
+        * pointer returned from "sk = bpf_sk_lookup_tcp();":
+        *
+        * 1: sk = bpf_sk_lookup_tcp();
+        * 2: if (!sk) { return 0; }
+        * 3: fullsock = bpf_sk_fullsock(sk);
+        * 4: if (!fullsock) { bpf_sk_release(sk); return 0; }
+        * 5: tp = bpf_tcp_sock(fullsock);
+        * 6: if (!tp) { bpf_sk_release(sk); return 0; }
+        * 7: bpf_sk_release(sk);
+        * 8: snd_cwnd = tp->snd_cwnd;  // verifier will complain
+        *
+        * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and
+        * "tp" ptr should be invalidated also.  In order to do that,
+        * the reg holding "fullsock" and "sk" need to remember
+        * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id
+        * such that the verifier can reset all regs which have
+        * ref_obj_id matching the sk_reg->id.
+        *
+        * sk_reg->ref_obj_id is set to sk_reg->id at line 1.
+        * sk_reg->id will stay as NULL-marking purpose only.
+        * After NULL-marking is done, sk_reg->id can be reset to 0.
+        *
+        * After "fullsock = bpf_sk_fullsock(sk);" at line 3,
+        * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id.
+        *
+        * After "tp = bpf_tcp_sock(fullsock);" at line 5,
+        * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id
+        * which is the same as sk_reg->ref_obj_id.
+        *
+        * From the verifier perspective, if sk, fullsock and tp
+        * are not NULL, they are the same ptr with different
+        * reg->type.  In particular, bpf_sk_release(tp) is also
+        * allowed and has the same effect as bpf_sk_release(sk).
+        */
+       u32 ref_obj_id;
        /* For scalar types (SCALAR_VALUE), this represents our knowledge of
         * the actual value.
         * For pointer types, this represents the variable part of the offset
index 9cd00a37b8d32b83e3539ed5ffce694c63efc8f0..6db2d9a6e503106261e042e5d65fdc68aa92194b 100644 (file)
 #define BCM_LED_SRC_OFF                0xe     /* Tied high */
 #define BCM_LED_SRC_ON         0xf     /* Tied low */
 
+/*
+ * Broadcom Multicolor LED configurations (expansion register 4)
+ */
+#define BCM_EXP_MULTICOLOR             (MII_BCM54XX_EXP_SEL_ER + 0x04)
+#define BCM_LED_MULTICOLOR_IN_PHASE    BIT(8)
+#define BCM_LED_MULTICOLOR_LINK_ACT    0x0
+#define BCM_LED_MULTICOLOR_SPEED       0x1
+#define BCM_LED_MULTICOLOR_ACT_FLASH   0x2
+#define BCM_LED_MULTICOLOR_FDX         0x3
+#define BCM_LED_MULTICOLOR_OFF         0x4
+#define BCM_LED_MULTICOLOR_ON          0x5
+#define BCM_LED_MULTICOLOR_ALT         0x6
+#define BCM_LED_MULTICOLOR_FLASH       0x7
+#define BCM_LED_MULTICOLOR_LINK                0x8
+#define BCM_LED_MULTICOLOR_ACT         0x9
+#define BCM_LED_MULTICOLOR_PROGRAM     0xa
 
 /*
  * BCM5482: Shadow registers
index b425a7ee04ce4d2a56f4ed951ae4c7f6779818fd..4e6987e11f688bc12001cc4d030e8a3a99faa514 100644 (file)
@@ -49,8 +49,6 @@ struct bus_attribute {
        ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
 };
 
-#define BUS_ATTR(_name, _mode, _show, _store)  \
-       struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
 #define BUS_ATTR_RW(_name) \
        struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
 #define BUS_ATTR_RO(_name) \
index ea35263eb76b76e796f2f3ffaa3378585b98db82..11943b60f2084cb5e69c6dd5553f2adf0ea4f8ca 100644 (file)
@@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void)
 #define pud_huge(x)    0
 #define is_hugepage_only_range(mm, addr, len)  0
 #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_fault(mm, vma, addr, flags)    ({ BUG(); 0; })
 #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
                                src_addr, pagep)        ({ BUG(); 0; })
 #define huge_pte_offset(mm, address, sz)       0
@@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 {
        BUG();
 }
+static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
+                               struct vm_area_struct *vma, unsigned long address,
+                               unsigned int flags)
+{
+       BUG();
+       return 0;
+}
 
 #endif /* !CONFIG_HUGETLB_PAGE */
 /*
index c843f4a9c512588edc333075cdc3856dc9a582d3..da676cdbd7277e32feb96a56063167afc0964215 100644 (file)
@@ -38,12 +38,6 @@ struct vmcoredd_node {
 
 #ifdef CONFIG_PROC_KCORE
 void __init kclist_add(struct kcore_list *, void *, size_t, int type);
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-       m->vaddr = (unsigned long)vaddr;
-       kclist_add(m, addr, sz, KCORE_REMAP);
-}
 
 extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn));
 #else
@@ -51,11 +45,6 @@ static inline
 void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
 {
 }
-
-static inline
-void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz)
-{
-}
 #endif
 
 #endif /* _LINUX_KCORE_H */
index 79626b5ab36cce2492a406c3ff4f50023c4c2150..58aa3adf94e63585631876b9a880c454fbd905df 100644 (file)
@@ -207,7 +207,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
 }
 
 /**
- * list_is_first -- tests whether @ list is the first entry in list @head
+ * list_is_first -- tests whether @list is the first entry in list @head
  * @list: the entry to test
  * @head: the head of the list
  */
index 651fca72286c4838307b8fe83d78bbda2dc81015..c606c72311d0e08564b274fba3aaa2d2cb3a6e7c 100644 (file)
@@ -83,6 +83,12 @@ enum sock_type {
 
 #endif /* ARCH_HAS_SOCKET_TYPES */
 
+/**
+ * enum sock_shutdown_cmd - Shutdown types
+ * @SHUT_RD: shutdown receptions
+ * @SHUT_WR: shutdown transmissions
+ * @SHUT_RDWR: shutdown receptions/transmissions
+ */
 enum sock_shutdown_cmd {
        SHUT_RD,
        SHUT_WR,
index 4eb26d2780460a6a821f110ce903ad2a1a54ccce..280ae96dc4c300d29418ececeb5e9b91acf27e2e 100644 (file)
@@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
 
 /*
  * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
- * If specified range includes migrate types other than MOVABLE or CMA,
- * this will fail with -EBUSY.
- *
- * For isolating all pages in the range finally, the caller have to
- * free all pages in the range. test_page_isolated() can be used for
- * test it.
- *
- * The following flags are allowed (they can be combined in a bit mask)
- * SKIP_HWPOISON - ignore hwpoison pages
- * REPORT_FAILURE - report details about the failure to isolate the range
  */
 int
 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
index f41f1d041e2c5e325e4d817781e13ed60695276c..397607a0c0ebef2969a3c4ea81893be860c8aacc 100644 (file)
@@ -460,7 +460,6 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
                                              void *, size_t, int);
 
 /* IEEE1284.3 functions */
-#define daisy_dev_name "Device ID probe"
 extern int parport_daisy_init (struct parport *port);
 extern void parport_daisy_fini (struct parport *port);
 extern struct pardevice *parport_open (int devnum, const char *name);
@@ -469,18 +468,6 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len);
 extern void parport_daisy_deselect_all (struct parport *port);
 extern int parport_daisy_select (struct parport *port, int daisy, int mode);
 
-#ifdef CONFIG_PARPORT_1284
-extern int daisy_drv_init(void);
-extern void daisy_drv_exit(void);
-#else
-static inline int daisy_drv_init(void)
-{
-       return 0;
-}
-
-static inline void daisy_drv_exit(void) {}
-#endif
-
 /* Lowlevel drivers _can_ call this support function to handle irqs.  */
 static inline void parport_generic_irq(struct parport *port)
 {
index a867637e172d75cbe77a6593309c30cb82a1d197..9e46678edb2aff1e5f0a0d2247e969829551adca 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL+ */
+/* SPDX-License-Identifier: GPL-2.0+ */
 
 /*
  * AMD FCH gpio driver platform-data
index ae56551976983dc508d3f3d63903d3a284a5f3a4..e412c092c1e821edd18f0c9dc0ed734ced5d6234 100644 (file)
@@ -418,10 +418,20 @@ static inline void set_restore_sigmask(void)
        set_thread_flag(TIF_RESTORE_SIGMASK);
        WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
+
 static inline void clear_restore_sigmask(void)
 {
        clear_thread_flag(TIF_RESTORE_SIGMASK);
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK);
+}
 static inline bool test_restore_sigmask(void)
 {
        return test_thread_flag(TIF_RESTORE_SIGMASK);
@@ -439,6 +449,10 @@ static inline void set_restore_sigmask(void)
        current->restore_sigmask = true;
        WARN_ON(!test_thread_flag(TIF_SIGPENDING));
 }
+static inline void clear_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       tsk->restore_sigmask = false;
+}
 static inline void clear_restore_sigmask(void)
 {
        current->restore_sigmask = false;
@@ -447,6 +461,10 @@ static inline bool test_restore_sigmask(void)
 {
        return current->restore_sigmask;
 }
+static inline bool test_tsk_restore_sigmask(struct task_struct *tsk)
+{
+       return tsk->restore_sigmask;
+}
 static inline bool test_and_clear_restore_sigmask(void)
 {
        if (!current->restore_sigmask)
index 11b45f7ae4057c3b70105974b8f527dd6f30d8ba..9449b19c5f107a73bfe7eca9fe875708862dc3b9 100644 (file)
@@ -32,6 +32,8 @@
 #define SLAB_HWCACHE_ALIGN     ((slab_flags_t __force)0x00002000U)
 /* Use GFP_DMA memory */
 #define SLAB_CACHE_DMA         ((slab_flags_t __force)0x00004000U)
+/* Use GFP_DMA32 memory */
+#define SLAB_CACHE_DMA32       ((slab_flags_t __force)0x00008000U)
 /* DEBUG: Store the last owner for bug hunting */
 #define SLAB_STORE_USER                ((slab_flags_t __force)0x00010000U)
 /* Panic if kmem_cache_create() fails */
index 6016daeecee41f28511f46c8618fbf05a16aff8a..b57cd8bf96e2b67c6588716cdfbd92dba58d7b0f 100644 (file)
@@ -26,7 +26,7 @@ typedef __kernel_sa_family_t  sa_family_t;
 /*
  *     1003.1g requires sa_family_t and that sa_data is char.
  */
+
 struct sockaddr {
        sa_family_t     sa_family;      /* address family, AF_xxx       */
        char            sa_data[14];    /* 14 bytes of protocol address */
@@ -44,7 +44,7 @@ struct linger {
  *     system, not 4.3. Thus msg_accrights(len) are now missing. They
  *     belong in an obscure libc emulation or the bin.
  */
+
 struct msghdr {
        void            *msg_name;      /* ptr to socket address structure */
        int             msg_namelen;    /* size of socket address structure */
@@ -54,7 +54,7 @@ struct msghdr {
        unsigned int    msg_flags;      /* flags on received message */
        struct kiocb    *msg_iocb;      /* ptr to iocb for async requests */
 };
+
 struct user_msghdr {
        void            __user *msg_name;       /* ptr to socket address structure */
        int             msg_namelen;            /* size of socket address structure */
@@ -122,7 +122,7 @@ struct cmsghdr {
  *     inside range, given by msg->msg_controllen before using
  *     ancillary object DATA.                          --ANK (980731)
  */
+
 static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size,
                                               struct cmsghdr *__cmsg)
 {
@@ -264,10 +264,10 @@ struct ucred {
 /* Maximum queue length specifiable by listen.  */
 #define SOMAXCONN      128
 
-/* Flags we can use with send/ and recv. 
+/* Flags we can use with send/ and recv.
    Added those for 1003.1g not all are supported yet
  */
+
 #define MSG_OOB                1
 #define MSG_PEEK       2
 #define MSG_DONTROUTE  4
index a240ed2a0372c20281e03a45fe49dea6a2fd60a3..ff56c443180cd6d35ec6f354ea2d519ce5443b83 100644 (file)
@@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
 #define vbg_debug pr_debug
 #endif
 
-int vbg_hgcm_connect(struct vbg_dev *gdev,
+int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor,
                     struct vmmdev_hgcm_service_location *loc,
                     u32 *client_id, int *vbox_status);
 
-int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status);
+int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor,
+                       u32 client_id, int *vbox_status);
 
-int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
-                 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
-                 u32 parm_count, int *vbox_status);
+int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id,
+                 u32 function, u32 timeout_ms,
+                 struct vmmdev_hgcm_function_parameter *parms, u32 parm_count,
+                 int *vbox_status);
 
 /**
  * Convert a VirtualBox status code to a standard Linux kernel return value.
index c745e9ccfab2d6f86a58c1a038e416fe29a63d67..c61a1bf4e3de544dd41886e9126b7ff20eb829c3 100644 (file)
@@ -39,7 +39,7 @@ struct tc_action {
        struct gnet_stats_basic_cpu __percpu *cpu_bstats_hw;
        struct gnet_stats_queue __percpu *cpu_qstats;
        struct tc_cookie        __rcu *act_cookie;
-       struct tcf_chain        *goto_chain;
+       struct tcf_chain        __rcu *goto_chain;
 };
 #define tcf_index      common.tcfa_index
 #define tcf_refcnt     common.tcfa_refcnt
@@ -90,7 +90,7 @@ struct tc_action_ops {
        int     (*lookup)(struct net *net, struct tc_action **a, u32 index);
        int     (*init)(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act, int ovr,
-                       int bind, bool rtnl_held,
+                       int bind, bool rtnl_held, struct tcf_proto *tp,
                        struct netlink_ext_ack *extack);
        int     (*walk)(struct net *, struct sk_buff *,
                        struct netlink_callback *, int,
@@ -181,6 +181,11 @@ int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
 int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
 
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+                            struct tcf_chain **handle,
+                            struct netlink_ext_ack *newchain);
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+                                        struct tcf_chain *newchain);
 #endif /* CONFIG_NET_CLS_ACT */
 
 static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
index 31284c078d06b6bb426fec08e4776a1e67ada41b..7d1a0483a17ba01b94643fd78acd72095a2e0adb 100644 (file)
@@ -378,6 +378,7 @@ struct tcf_chain {
        bool flushing;
        const struct tcf_proto_ops *tmplt_ops;
        void *tmplt_priv;
+       struct rcu_head rcu;
 };
 
 struct tcf_block {
index 32ee65a30aff1146dcafcc533e73833e190cb887..1c6e6c0766ca09b771d865883c7c4daf390215c6 100644 (file)
@@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
                                        unsigned int offset)
 {
-       struct sctphdr *sh = sctp_hdr(skb);
+       struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
        const struct skb_checksum_ops ops = {
                .update  = sctp_csum_update,
                .combine = sctp_csum_combine,
index 328cb7cb7b0bb93f1eb3ec2708c43b29b56efb15..8de5ee258b93a50b2fdcde796bae3a5b53ce4d6a 100644 (file)
@@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
                hlist_add_head_rcu(&sk->sk_node, list);
 }
 
+static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
+{
+       sock_hold(sk);
+       hlist_add_tail_rcu(&sk->sk_node, list);
+}
+
 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
 {
        hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
index ee8d005f56fcddb0e3dc5d68b10a5dd1efdeb8f7..eb8f01c819e636aca55019cbe791b0e40b612baf 100644 (file)
@@ -56,7 +56,7 @@ static inline bool is_tcf_gact_goto_chain(const struct tc_action *a)
 
 static inline u32 tcf_gact_goto_chain_index(const struct tc_action *a)
 {
-       return a->goto_chain->index;
+       return READ_ONCE(a->tcfa_action) & TC_ACT_EXT_VAL_MASK;
 }
 
 #endif /* __NET_TC_GACT_H */
index 61cf7dbb678298559ccf9ea432adef04507bc597..d074b6d60f8af77a355b6755ef3d096237ff55bc 100644 (file)
@@ -36,7 +36,6 @@ struct xdp_umem {
        u32 headroom;
        u32 chunk_size_nohr;
        struct user_struct *user;
-       struct pid *pid;
        unsigned long address;
        refcount_t users;
        struct work_struct work;
index 3c38ac9a92a7c4b18cbb4ac49ac60bf887b03d20..929c8e537a14a517c0a3c7ca5b6b15353d622c30 100644 (file)
@@ -502,16 +502,6 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- *     Description
- *             Push an element *value* in *map*. *flags* is one of:
- *
- *             **BPF_EXIST**
- *             If the queue/stack is full, the oldest element is removed to
- *             make room for this.
- *     Return
- *             0 on success, or a negative error in case of failure.
- *
  * int bpf_probe_read(void *dst, u32 size, const void *src)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_addr** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_ops** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
@@ -2098,52 +2088,52 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded key press with *scancode*,
- *             *toggle* value in the given *protocol*. The scancode will be
- *             translated to a keycode using the rc keymap, and reported as
- *             an input key down event. After a period a key up event is
- *             generated. This period can be extended by calling either
- *             **bpf_rc_keydown**\ () again with the same values, or calling
- *             **bpf_rc_repeat**\ ().
+ *             report a successfully decoded repeat key message. This delays
+ *             the generation of a key up event for previously generated
+ *             key down event.
  *
- *             Some protocols include a toggle bit, in case the button was
- *             released and pressed again between consecutive scancodes.
+ *             Some IR protocols like NEC have a special IR message for
+ *             repeating last button, for when a button is held down.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
- *             The *protocol* is the decoded protocol number (see
- *             **enum rc_proto** for some predefined values).
- *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded repeat key message. This delays
- *             the generation of a key up event for previously generated
- *             key down event.
+ *             report a successfully decoded key press with *scancode*,
+ *             *toggle* value in the given *protocol*. The scancode will be
+ *             translated to a keycode using the rc keymap, and reported as
+ *             an input key down event. After a period a key up event is
+ *             generated. This period can be extended by calling either
+ *             **bpf_rc_keydown**\ () again with the same values, or calling
+ *             **bpf_rc_repeat**\ ().
  *
- *             Some IR protocols like NEC have a special IR message for
- *             repeating last button, for when a button is held down.
+ *             Some protocols include a toggle bit, in case the button was
+ *             released and pressed again between consecutive scancodes.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
+ *             The *protocol* is the decoded protocol number (see
+ *             **enum rc_proto** for some predefined values).
+ *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
  *     Description
  *             Return the cgroup v2 id of the socket associated with the *skb*.
  *             This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- *     Description
- *             Return id of cgroup v2 that is ancestor of cgroup associated
- *             with the *skb* at the *ancestor_level*.  The root cgroup is at
- *             *ancestor_level* zero and each step down the hierarchy
- *             increments the level. If *ancestor_level* == level of cgroup
- *             associated with *skb*, then return value will be same as that
- *             of **bpf_skb_cgroup_id**\ ().
- *
- *             The helper is useful to implement policies based on cgroups
- *             that are upper in hierarchy than immediate cgroup associated
- *             with *skb*.
- *
- *             The format of returned id and helper limitations are same as in
- *             **bpf_skb_cgroup_id**\ ().
- *     Return
- *             The id is returned or 0 in case the id could not be retrieved.
- *
  * u64 bpf_get_current_cgroup_id(void)
  *     Return
  *             A 64-bit integer containing the current cgroup id based
  *             on the cgroup within which the current task is running.
  *
- * voidget_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
  *     Description
  *             Get the pointer to the local storage area.
  *             The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ *     Description
+ *             Return id of cgroup v2 that is ancestor of cgroup associated
+ *             with the *skb* at the *ancestor_level*.  The root cgroup is at
+ *             *ancestor_level* zero and each step down the hierarchy
+ *             increments the level. If *ancestor_level* == level of cgroup
+ *             associated with *skb*, then return value will be same as that
+ *             of **bpf_skb_cgroup_id**\ ().
+ *
+ *             The helper is useful to implement policies based on cgroups
+ *             that are upper in hierarchy than immediate cgroup associated
+ *             with *skb*.
+ *
+ *             The format of returned id and helper limitations are same as in
+ *             **bpf_skb_cgroup_id**\ ().
+ *     Return
+ *             The id is returned or 0 in case the id could not be retrieved.
+ *
  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ *     Description
+ *             Push an element *value* in *map*. *flags* is one of:
+ *
+ *             **BPF_EXIST**
+ *                     If the queue/stack is full, the oldest element is
+ *                     removed to make room for this.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
  *     Return
  *             0
  *
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Acquire a spinlock represented by the pointer *lock*, which is
+ *             stored as part of a value of a map. Taking the lock allows to
+ *             safely update the rest of the fields in that value. The
+ *             spinlock can (and must) later be released with a call to
+ *             **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ *             Spinlocks in BPF programs come with a number of restrictions
+ *             and constraints:
+ *
+ *             * **bpf_spin_lock** objects are only allowed inside maps of
+ *               types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ *               list could be extended in the future).
+ *             * BTF description of the map is mandatory.
+ *             * The BPF program can take ONE lock at a time, since taking two
+ *               or more could cause dead locks.
+ *             * Only one **struct bpf_spin_lock** is allowed per map element.
+ *             * When the lock is taken, calls (either BPF to BPF or helpers)
+ *               are not allowed.
+ *             * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ *               allowed inside a spinlock-ed region.
+ *             * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ *               the lock, on all execution paths, before it returns.
+ *             * The BPF program can access **struct bpf_spin_lock** only via
+ *               the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ *               helpers. Loading or storing data into the **struct
+ *               bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ *             * To use the **bpf_spin_lock**\ () helper, the BTF description
+ *               of the map value must be a struct and have **struct
+ *               bpf_spin_lock** *anyname*\ **;** field at the top level.
+ *               Nested lock inside another struct is not allowed.
+ *             * The **struct bpf_spin_lock** *lock* field in a map value must
+ *               be aligned on a multiple of 4 bytes in that value.
+ *             * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ *               the **bpf_spin_lock** field to user space.
+ *             * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ *               a BPF program, do not update the **bpf_spin_lock** field.
+ *             * **bpf_spin_lock** cannot be on the stack or inside a
+ *               networking packet (it can only be inside of a map values).
+ *             * **bpf_spin_lock** is available to root only.
+ *             * Tracing programs and socket filter programs cannot use
+ *               **bpf_spin_lock**\ () due to insufficient preemption checks
+ *               (but this may change in the future).
+ *             * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ *     Return
+ *             0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Release the *lock* previously locked by a call to
+ *             **bpf_spin_lock**\ (\ *lock*\ ).
+ *     Return
+ *             0
+ *
  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_sock** pointer such
- *             that all the fields in bpf_sock can be accessed.
+ *             that all the fields in this **bpf_sock** can be accessed.
  *     Return
- *             A **struct bpf_sock** pointer on success, or NULL in
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_tcp_sock** pointer from a
  *             **struct bpf_sock** pointer.
- *
  *     Return
- *             A **struct bpf_tcp_sock** pointer on success, or NULL in
+ *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- *     Description
- *             Sets ECN of IP header to ce (congestion encountered) if
- *             current value is ect (ECN capable). Works with IPv6 and IPv4.
- *     Return
- *             1 if set, 0 if not set.
+ *     Description
+ *             Set ECN (Explicit Congestion Notification) field of IP header
+ *             to **CE** (Congestion Encountered) if current value is **ECT**
+ *             (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ *             and IPv4.
+ *     Return
+ *             1 if the **CE** flag is set (either by the current helper call
+ *             or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ *     Description
+ *             Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ *             **bpf_sk_release**\ () is unnecessary and not allowed.
+ *     Return
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
+ *             case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -2465,7 +2530,8 @@ union bpf_attr {
        FN(spin_unlock),                \
        FN(sk_fullsock),                \
        FN(tcp_sock),                   \
-       FN(skb_ecn_set_ce),
+       FN(skb_ecn_set_ce),             \
+       FN(get_listener_sock),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index 0e68024f36c712dcb295f6214914ea5dc3371a25..26f39816af14c149ab1d8be5842112f4bf36c18c 100644 (file)
@@ -102,6 +102,66 @@ enum vmmdev_request_type {
 #define VMMDEVREQ_HGCM_CALL VMMDEVREQ_HGCM_CALL32
 #endif
 
+/* vmmdev_request_header.requestor defines */
+
+/* Requestor user not given. */
+#define VMMDEV_REQUESTOR_USR_NOT_GIVEN                      0x00000000
+/* The kernel driver (vboxguest) is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV                            0x00000001
+/* Some other kernel driver is the requestor. */
+#define VMMDEV_REQUESTOR_USR_DRV_OTHER                      0x00000002
+/* The root or a admin user is the requestor. */
+#define VMMDEV_REQUESTOR_USR_ROOT                           0x00000003
+/* Regular joe user is making the request. */
+#define VMMDEV_REQUESTOR_USR_USER                           0x00000006
+/* User classification mask. */
+#define VMMDEV_REQUESTOR_USR_MASK                           0x00000007
+
+/* Kernel mode request. Note this is 0, check for !USERMODE instead. */
+#define VMMDEV_REQUESTOR_KERNEL                             0x00000000
+/* User mode request. */
+#define VMMDEV_REQUESTOR_USERMODE                           0x00000008
+/* User or kernel mode classification mask. */
+#define VMMDEV_REQUESTOR_MODE_MASK                          0x00000008
+
+/* Don't know the physical console association of the requestor. */
+#define VMMDEV_REQUESTOR_CON_DONT_KNOW                      0x00000000
+/*
+ * The request originates with a process that is NOT associated with the
+ * physical console.
+ */
+#define VMMDEV_REQUESTOR_CON_NO                             0x00000010
+/* Requestor process is associated with the physical console. */
+#define VMMDEV_REQUESTOR_CON_YES                            0x00000020
+/* Console classification mask. */
+#define VMMDEV_REQUESTOR_CON_MASK                           0x00000030
+
+/* Requestor is member of special VirtualBox user group. */
+#define VMMDEV_REQUESTOR_GRP_VBOX                           0x00000080
+
+/* Note: trust level is for windows guests only, linux always uses not-given */
+/* Requestor trust level: Unspecified */
+#define VMMDEV_REQUESTOR_TRUST_NOT_GIVEN                    0x00000000
+/* Requestor trust level: Untrusted (SID S-1-16-0) */
+#define VMMDEV_REQUESTOR_TRUST_UNTRUSTED                    0x00001000
+/* Requestor trust level: Untrusted (SID S-1-16-4096) */
+#define VMMDEV_REQUESTOR_TRUST_LOW                          0x00002000
+/* Requestor trust level: Medium (SID S-1-16-8192) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM                       0x00003000
+/* Requestor trust level: Medium plus (SID S-1-16-8448) */
+#define VMMDEV_REQUESTOR_TRUST_MEDIUM_PLUS                  0x00004000
+/* Requestor trust level: High (SID S-1-16-12288) */
+#define VMMDEV_REQUESTOR_TRUST_HIGH                         0x00005000
+/* Requestor trust level: System (SID S-1-16-16384) */
+#define VMMDEV_REQUESTOR_TRUST_SYSTEM                       0x00006000
+/* Requestor trust level >= Protected (SID S-1-16-20480, S-1-16-28672) */
+#define VMMDEV_REQUESTOR_TRUST_PROTECTED                    0x00007000
+/* Requestor trust level mask */
+#define VMMDEV_REQUESTOR_TRUST_MASK                         0x00007000
+
+/* Requestor is using the less trusted user device node (/dev/vboxuser) */
+#define VMMDEV_REQUESTOR_USER_DEVICE                        0x00008000
+
 /** HGCM service location types. */
 enum vmmdev_hgcm_service_location_type {
        VMMDEV_HGCM_LOC_INVALID    = 0,
index 62f6bced3a3c486732dd871693d5d44cf19ab8c2..afca36f53c492718820ecacdb588af585dbb50e4 100644 (file)
@@ -136,21 +136,29 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
 
 void *bpf_map_area_alloc(size_t size, int numa_node)
 {
-       /* We definitely need __GFP_NORETRY, so OOM killer doesn't
-        * trigger under memory pressure as we really just want to
-        * fail instead.
+       /* We really just want to fail instead of triggering OOM killer
+        * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
+        * which is used for lower order allocation requests.
+        *
+        * It has been observed that higher order allocation requests done by
+        * vmalloc with __GFP_NORETRY being set might fail due to not trying
+        * to reclaim memory from the page cache, thus we set
+        * __GFP_RETRY_MAYFAIL to avoid such situations.
         */
-       const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+
+       const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
        void *area;
 
        if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
-               area = kmalloc_node(size, GFP_USER | flags, numa_node);
+               area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
+                                   numa_node);
                if (area != NULL)
                        return area;
        }
 
-       return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags,
-                                          __builtin_return_address(0));
+       return __vmalloc_node_flags_caller(size, numa_node,
+                                          GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+                                          flags, __builtin_return_address(0));
 }
 
 void bpf_map_area_free(void *area)
index ce166a002d161a08eff6bdbb77158886dbba8012..fd502c1f71eb003e5975ec58e33ccc8f8e1c0586 100644 (file)
@@ -212,7 +212,7 @@ struct bpf_call_arg_meta {
        int access_size;
        s64 msize_smax_value;
        u64 msize_umax_value;
-       int ptr_id;
+       int ref_obj_id;
        int func_id;
 };
 
@@ -346,35 +346,23 @@ static bool reg_type_may_be_null(enum bpf_reg_type type)
               type == PTR_TO_TCP_SOCK_OR_NULL;
 }
 
-static bool type_is_refcounted(enum bpf_reg_type type)
-{
-       return type == PTR_TO_SOCKET;
-}
-
-static bool type_is_refcounted_or_null(enum bpf_reg_type type)
-{
-       return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
-}
-
-static bool reg_is_refcounted(const struct bpf_reg_state *reg)
-{
-       return type_is_refcounted(reg->type);
-}
-
 static bool reg_may_point_to_spin_lock(const struct bpf_reg_state *reg)
 {
        return reg->type == PTR_TO_MAP_VALUE &&
                map_value_has_spin_lock(reg->map_ptr);
 }
 
-static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
+static bool reg_type_may_be_refcounted_or_null(enum bpf_reg_type type)
 {
-       return type_is_refcounted_or_null(reg->type);
+       return type == PTR_TO_SOCKET ||
+               type == PTR_TO_SOCKET_OR_NULL ||
+               type == PTR_TO_TCP_SOCK ||
+               type == PTR_TO_TCP_SOCK_OR_NULL;
 }
 
-static bool arg_type_is_refcounted(enum bpf_arg_type type)
+static bool arg_type_may_be_refcounted(enum bpf_arg_type type)
 {
-       return type == ARG_PTR_TO_SOCKET;
+       return type == ARG_PTR_TO_SOCK_COMMON;
 }
 
 /* Determine whether the function releases some resources allocated by another
@@ -392,6 +380,12 @@ static bool is_acquire_function(enum bpf_func_id func_id)
                func_id == BPF_FUNC_sk_lookup_udp;
 }
 
+static bool is_ptr_cast_function(enum bpf_func_id func_id)
+{
+       return func_id == BPF_FUNC_tcp_sock ||
+               func_id == BPF_FUNC_sk_fullsock;
+}
+
 /* string representation of 'enum bpf_reg_type' */
 static const char * const reg_type_str[] = {
        [NOT_INIT]              = "?",
@@ -466,6 +460,8 @@ static void print_verifier_state(struct bpf_verifier_env *env,
                                verbose(env, ",call_%d", func(env, reg)->callsite);
                } else {
                        verbose(env, "(id=%d", reg->id);
+                       if (reg_type_may_be_refcounted_or_null(t))
+                               verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
                        if (t != SCALAR_VALUE)
                                verbose(env, ",off=%d", reg->off);
                        if (type_is_pkt_pointer(t))
@@ -2414,16 +2410,15 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
                /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */
                if (!type_is_sk_pointer(type))
                        goto err_type;
-       } else if (arg_type == ARG_PTR_TO_SOCKET) {
-               expected_type = PTR_TO_SOCKET;
-               if (type != expected_type)
-                       goto err_type;
-               if (meta->ptr_id || !reg->id) {
-                       verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
-                               meta->ptr_id, reg->id);
-                       return -EFAULT;
+               if (reg->ref_obj_id) {
+                       if (meta->ref_obj_id) {
+                               verbose(env, "verifier internal error: more than one arg with ref_obj_id R%d %u %u\n",
+                                       regno, reg->ref_obj_id,
+                                       meta->ref_obj_id);
+                               return -EFAULT;
+                       }
+                       meta->ref_obj_id = reg->ref_obj_id;
                }
-               meta->ptr_id = reg->id;
        } else if (arg_type == ARG_PTR_TO_SPIN_LOCK) {
                if (meta->func_id == BPF_FUNC_spin_lock) {
                        if (process_spin_lock(env, regno, true))
@@ -2740,32 +2735,38 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
        return true;
 }
 
-static bool check_refcount_ok(const struct bpf_func_proto *fn)
+static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
 {
        int count = 0;
 
-       if (arg_type_is_refcounted(fn->arg1_type))
+       if (arg_type_may_be_refcounted(fn->arg1_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg2_type))
+       if (arg_type_may_be_refcounted(fn->arg2_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg3_type))
+       if (arg_type_may_be_refcounted(fn->arg3_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg4_type))
+       if (arg_type_may_be_refcounted(fn->arg4_type))
                count++;
-       if (arg_type_is_refcounted(fn->arg5_type))
+       if (arg_type_may_be_refcounted(fn->arg5_type))
                count++;
 
+       /* A reference acquiring function cannot acquire
+        * another refcounted ptr.
+        */
+       if (is_acquire_function(func_id) && count)
+               return false;
+
        /* We only support one arg being unreferenced at the moment,
         * which is sufficient for the helper functions we have right now.
         */
        return count <= 1;
 }
 
-static int check_func_proto(const struct bpf_func_proto *fn)
+static int check_func_proto(const struct bpf_func_proto *fn, int func_id)
 {
        return check_raw_mode_ok(fn) &&
               check_arg_pair_ok(fn) &&
-              check_refcount_ok(fn) ? 0 : -EINVAL;
+              check_refcount_ok(fn, func_id) ? 0 : -EINVAL;
 }
 
 /* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@@ -2799,19 +2800,20 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
 }
 
 static void release_reg_references(struct bpf_verifier_env *env,
-                                  struct bpf_func_state *state, int id)
+                                  struct bpf_func_state *state,
+                                  int ref_obj_id)
 {
        struct bpf_reg_state *regs = state->regs, *reg;
        int i;
 
        for (i = 0; i < MAX_BPF_REG; i++)
-               if (regs[i].id == id)
+               if (regs[i].ref_obj_id == ref_obj_id)
                        mark_reg_unknown(env, regs, i);
 
        bpf_for_each_spilled_reg(i, state, reg) {
                if (!reg)
                        continue;
-               if (reg_is_refcounted(reg) && reg->id == id)
+               if (reg->ref_obj_id == ref_obj_id)
                        __mark_reg_unknown(reg);
        }
 }
@@ -2820,15 +2822,20 @@ static void release_reg_references(struct bpf_verifier_env *env,
  * resources. Identify all copies of the same pointer and clear the reference.
  */
 static int release_reference(struct bpf_verifier_env *env,
-                            struct bpf_call_arg_meta *meta)
+                            int ref_obj_id)
 {
        struct bpf_verifier_state *vstate = env->cur_state;
+       int err;
        int i;
 
+       err = release_reference_state(cur_func(env), ref_obj_id);
+       if (err)
+               return err;
+
        for (i = 0; i <= vstate->curframe; i++)
-               release_reg_references(env, vstate->frame[i], meta->ptr_id);
+               release_reg_references(env, vstate->frame[i], ref_obj_id);
 
-       return release_reference_state(cur_func(env), meta->ptr_id);
+       return 0;
 }
 
 static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
@@ -3047,7 +3054,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
        memset(&meta, 0, sizeof(meta));
        meta.pkt_access = fn->pkt_access;
 
-       err = check_func_proto(fn);
+       err = check_func_proto(fn, func_id);
        if (err) {
                verbose(env, "kernel subsystem misconfigured func %s#%d\n",
                        func_id_name(func_id), func_id);
@@ -3093,7 +3100,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                        return err;
                }
        } else if (is_release_function(func_id)) {
-               err = release_reference(env, &meta);
+               err = release_reference(env, meta.ref_obj_id);
                if (err) {
                        verbose(env, "func %s#%d reference has not been acquired before\n",
                                func_id_name(func_id), func_id);
@@ -3154,8 +3161,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
 
                        if (id < 0)
                                return id;
-                       /* For release_reference() */
+                       /* For mark_ptr_or_null_reg() */
                        regs[BPF_REG_0].id = id;
+                       /* For release_reference() */
+                       regs[BPF_REG_0].ref_obj_id = id;
                } else {
                        /* For mark_ptr_or_null_reg() */
                        regs[BPF_REG_0].id = ++env->id_gen;
@@ -3170,6 +3179,10 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
                return -EINVAL;
        }
 
+       if (is_ptr_cast_function(func_id))
+               /* For release_reference() */
+               regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
+
        do_refine_retval_range(regs, fn->ret_type, func_id, &meta);
 
        err = check_map_func_compatibility(env, meta.map_ptr, func_id);
@@ -3368,7 +3381,7 @@ do_sim:
                *dst_reg = *ptr_reg;
        }
        ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
-       if (!ptr_is_dst_reg)
+       if (!ptr_is_dst_reg && ret)
                *dst_reg = tmp;
        return !ret ? -EFAULT : 0;
 }
@@ -4665,11 +4678,19 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
                } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) {
                        reg->type = PTR_TO_TCP_SOCK;
                }
-               if (is_null || !(reg_is_refcounted(reg) ||
-                                reg_may_point_to_spin_lock(reg))) {
-                       /* We don't need id from this point onwards anymore,
-                        * thus we should better reset it, so that state
-                        * pruning has chances to take effect.
+               if (is_null) {
+                       /* We don't need id and ref_obj_id from this point
+                        * onwards anymore, thus we should better reset it,
+                        * so that state pruning has chances to take effect.
+                        */
+                       reg->id = 0;
+                       reg->ref_obj_id = 0;
+               } else if (!reg_may_point_to_spin_lock(reg)) {
+                       /* For not-NULL ptr, reg->ref_obj_id will be reset
+                        * in release_reg_references().
+                        *
+                        * reg->id is still used by spin_lock ptr. Other
+                        * than spin_lock ptr type, reg->id can be reset.
                         */
                        reg->id = 0;
                }
@@ -4684,11 +4705,16 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
 {
        struct bpf_func_state *state = vstate->frame[vstate->curframe];
        struct bpf_reg_state *reg, *regs = state->regs;
+       u32 ref_obj_id = regs[regno].ref_obj_id;
        u32 id = regs[regno].id;
        int i, j;
 
-       if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
-               release_reference_state(state, id);
+       if (ref_obj_id && ref_obj_id == id && is_null)
+               /* regs[regno] is in the " == NULL" branch.
+                * No one could have freed the reference state before
+                * doing the NULL check.
+                */
+               WARN_ON_ONCE(release_reference_state(state, id));
 
        for (i = 0; i < MAX_BPF_REG; i++)
                mark_ptr_or_null_reg(state, &regs[i], id, is_null);
@@ -6052,15 +6078,17 @@ static int propagate_liveness(struct bpf_verifier_env *env,
        }
        /* Propagate read liveness of registers... */
        BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
-       /* We don't need to worry about FP liveness because it's read-only */
-       for (i = 0; i < BPF_REG_FP; i++) {
-               if (vparent->frame[vparent->curframe]->regs[i].live & REG_LIVE_READ)
-                       continue;
-               if (vstate->frame[vstate->curframe]->regs[i].live & REG_LIVE_READ) {
-                       err = mark_reg_read(env, &vstate->frame[vstate->curframe]->regs[i],
-                                           &vparent->frame[vstate->curframe]->regs[i]);
-                       if (err)
-                               return err;
+       for (frame = 0; frame <= vstate->curframe; frame++) {
+               /* We don't need to worry about FP liveness, it's read-only */
+               for (i = frame < vstate->curframe ? BPF_REG_6 : 0; i < BPF_REG_FP; i++) {
+                       if (vparent->frame[frame]->regs[i].live & REG_LIVE_READ)
+                               continue;
+                       if (vstate->frame[frame]->regs[i].live & REG_LIVE_READ) {
+                               err = mark_reg_read(env, &vstate->frame[frame]->regs[i],
+                                                   &vparent->frame[frame]->regs[i]);
+                               if (err)
+                                       return err;
+                       }
                }
        }
 
index 771e93f9c43f826270c1927665fa5d6aaa8654e7..6f357f4fc85900db94f5a9dc45b098849e0e158d 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/cn_proc.h>
 #include <linux/compat.h>
+#include <linux/sched/signal.h>
 
 /*
  * Access another process' address space via ptrace.
@@ -924,18 +925,26 @@ int ptrace_request(struct task_struct *child, long request,
                        ret = ptrace_setsiginfo(child, &siginfo);
                break;
 
-       case PTRACE_GETSIGMASK:
+       case PTRACE_GETSIGMASK: {
+               sigset_t *mask;
+
                if (addr != sizeof(sigset_t)) {
                        ret = -EINVAL;
                        break;
                }
 
-               if (copy_to_user(datavp, &child->blocked, sizeof(sigset_t)))
+               if (test_tsk_restore_sigmask(child))
+                       mask = &child->saved_sigmask;
+               else
+                       mask = &child->blocked;
+
+               if (copy_to_user(datavp, mask, sizeof(sigset_t)))
                        ret = -EFAULT;
                else
                        ret = 0;
 
                break;
+       }
 
        case PTRACE_SETSIGMASK: {
                sigset_t new_set;
@@ -961,6 +970,8 @@ int ptrace_request(struct task_struct *child, long request,
                child->blocked = new_set;
                spin_unlock_irq(&child->sighand->siglock);
 
+               clear_tsk_restore_sigmask(child);
+
                ret = 0;
                break;
        }
index fa79323331b22f327872ae41e185a17562f6d78a..26c8ca9bd06b6725b84f42d6b635c0d256118ebb 100644 (file)
@@ -1992,7 +1992,7 @@ static void print_bug_type(void)
  * modifying the code. @failed should be one of either:
  * EFAULT - if the problem happens on reading the @ip address
  * EINVAL - if what is read at @ip is not what was expected
- * EPERM - if the problem happens on writting to the @ip address
+ * EPERM - if the problem happens on writing to the @ip address
  */
 void ftrace_bug(int failed, struct dyn_ftrace *rec)
 {
@@ -2391,7 +2391,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
                return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
        }
 
-       return -1; /* unknow ftrace bug */
+       return -1; /* unknown ftrace bug */
 }
 
 void __weak ftrace_replace_code(int mod_flags)
@@ -3004,7 +3004,7 @@ ftrace_allocate_pages(unsigned long num_to_init)
        int cnt;
 
        if (!num_to_init)
-               return 0;
+               return NULL;
 
        start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
        if (!pg)
@@ -4755,7 +4755,7 @@ static int
 ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
                int reset, int enable)
 {
-       return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
+       return ftrace_set_hash(ops, NULL, 0, ip, remove, reset, enable);
 }
 
 /**
@@ -5463,7 +5463,7 @@ void ftrace_create_filter_files(struct ftrace_ops *ops,
 
 /*
  * The name "destroy_filter_files" is really a misnomer. Although
- * in the future, it may actualy delete the files, but this is
+ * in the future, it may actually delete the files, but this is
  * really intended to make sure the ops passed in are disabled
  * and that when this function returns, the caller is free to
  * free the ops.
@@ -5786,7 +5786,7 @@ void ftrace_module_enable(struct module *mod)
        /*
         * If the tracing is enabled, go ahead and enable the record.
         *
-        * The reason not to enable the record immediatelly is the
+        * The reason not to enable the record immediately is the
         * inherent check of ftrace_make_nop/ftrace_make_call for
         * correct previous instructions.  Making first the NOP
         * conversion puts the module to the correct state, thus
index dd1f43588d7097a62a84966d8a90b425fd0f2f64..fa100ed3b4de9d128cd05215c972828ee13fbb26 100644 (file)
@@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
 static int create_dyn_event(int argc, char **argv)
 {
        struct dyn_event_operations *ops;
-       int ret;
+       int ret = -ENODEV;
 
        if (argv[0][0] == '-' || argv[0][0] == '!')
                return dyn_event_release(argc, argv, NULL);
index ca46339f30090d84d72667b4207a8272d7fa1e9f..795aa203837733f6968f26ae1f8f4ca7b399f695 100644 (file)
@@ -3713,7 +3713,6 @@ static void track_data_destroy(struct hist_trigger_data *hist_data,
        struct trace_event_file *file = hist_data->event_file;
 
        destroy_hist_field(data->track_data.track_var, 0);
-       destroy_hist_field(data->track_data.var_ref, 0);
 
        if (data->action == ACTION_SNAPSHOT) {
                struct track_data *track_data;
index 403c9bd9041395a1d7919977acc7ed68d8c746ca..6a578723311328a6394941aeb9ff4eef6130b4ac 100644 (file)
@@ -554,13 +554,15 @@ static void softlockup_start_all(void)
 
 int lockup_detector_online_cpu(unsigned int cpu)
 {
-       watchdog_enable(cpu);
+       if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+               watchdog_enable(cpu);
        return 0;
 }
 
 int lockup_detector_offline_cpu(unsigned int cpu)
 {
-       watchdog_disable(cpu);
+       if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
+               watchdog_disable(cpu);
        return 0;
 }
 
index 0a105d4af16644bcbdf8a4f62f295d3ac534e31d..97f59abc3e92583917769f232bdd9f7d5e67520f 100644 (file)
@@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
        else if (tbl->nest)
                err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
 
-       if (!err)
-               err = rhashtable_rehash_table(ht);
+       if (!err || err == -EEXIST) {
+               int nerr;
+
+               nerr = rhashtable_rehash_table(ht);
+               err = err ?: nerr;
+       }
 
        mutex_unlock(&ht->mutex);
 
index 5b382c1244ede33c14016142ac2d7fec4d0608da..155fe38756ecfda251f26fa8616a325dddd8d455 100644 (file)
@@ -591,6 +591,17 @@ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
                         unsigned int cpu)
 {
+       /*
+        * Once the clear bit is set, the bit may be allocated out.
+        *
+        * Orders READ/WRITE on the asssociated instance(such as request
+        * of blk_mq) by this bit for avoiding race with re-allocation,
+        * and its pair is the memory barrier implied in __sbitmap_get_word.
+        *
+        * One invariant is that the clear bit has to be zero when the bit
+        * is in use.
+        */
+       smp_mb__before_atomic();
        sbitmap_deferred_clear_bit(&sbq->sb, nr);
 
        /*
index c0b31b6c38773f37177d1a90a9e5ddd96653449c..eee9c221280c07c22eec9c33845ec2edf003faf1 100644 (file)
@@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
                pr_warn("ksm ");
        else if (mapping) {
                pr_warn("%ps ", mapping->a_ops);
-               if (mapping->host->i_dentry.first) {
+               if (mapping->host && mapping->host->i_dentry.first) {
                        struct dentry *dentry;
                        dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
                        pr_warn("name:\"%pd\" ", dentry);
@@ -168,7 +168,7 @@ void dump_mm(const struct mm_struct *mm)
                mm_pgtables_bytes(mm),
                mm->map_count,
                mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm,
-               atomic64_read(&mm->pinned_vm),
+               (u64)atomic64_read(&mm->pinned_vm),
                mm->data_vm, mm->exec_vm, mm->stack_vm,
                mm->start_code, mm->end_code, mm->start_data, mm->end_data,
                mm->start_brk, mm->brk, mm->start_stack,
index 3e0c11f7d7a1ef4125d355be6096d357efb8d9d9..3ce956efa0cb804cfd964bbc725857b7aed5d7f1 100644 (file)
@@ -163,7 +163,10 @@ static inline u8 random_tag(void)
 #endif
 
 #ifndef arch_kasan_set_tag
-#define arch_kasan_set_tag(addr, tag)  ((void *)(addr))
+static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
+{
+       return addr;
+}
 #endif
 #ifndef arch_kasan_reset_tag
 #define arch_kasan_reset_tag(addr)     ((void *)(addr))
index 47fe250307c7aa0f553454af6f459343819f8770..ab650c21bccd5450673470f845675096b09010d9 100644 (file)
@@ -1549,10 +1549,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                                WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
                                goto out_unlock;
                        }
-                       entry = *pte;
-                       goto out_mkwrite;
-               } else
-                       goto out_unlock;
+                       entry = pte_mkyoung(*pte);
+                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+                       if (ptep_set_access_flags(vma, addr, pte, entry, 1))
+                               update_mmu_cache(vma, addr, pte);
+               }
+               goto out_unlock;
        }
 
        /* Ok, finally just insert the thing.. */
@@ -1561,7 +1563,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
        else
                entry = pte_mkspecial(pfn_t_pte(pfn, prot));
 
-out_mkwrite:
        if (mkwrite) {
                entry = pte_mkyoung(entry);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
index f767582af4f8c0f28102f2d77d8dc6a667ec3df5..0082d699be94b4c28e1820351916568e68a684bb 100644 (file)
@@ -1576,7 +1576,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
 {
        unsigned long pfn, nr_pages;
        long offlined_pages;
-       int ret, node;
+       int ret, node, nr_isolate_pageblock;
        unsigned long flags;
        unsigned long valid_start, valid_end;
        struct zone *zone;
@@ -1602,10 +1602,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
        ret = start_isolate_page_range(start_pfn, end_pfn,
                                       MIGRATE_MOVABLE,
                                       SKIP_HWPOISON | REPORT_FAILURE);
-       if (ret) {
+       if (ret < 0) {
                reason = "failure to isolate range";
                goto failed_removal;
        }
+       nr_isolate_pageblock = ret;
 
        arg.start_pfn = start_pfn;
        arg.nr_pages = nr_pages;
@@ -1657,8 +1658,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
        /* Ok, all of our target is isolated.
           We cannot do rollback at this point. */
        offline_isolated_pages(start_pfn, end_pfn);
-       /* reset pagetype flags and makes migrate type to be MOVABLE */
-       undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+
+       /*
+        * Onlining will reset pagetype flags and makes migrate type
+        * MOVABLE, so just need to decrease the number of isolated
+        * pageblocks zone counter here.
+        */
+       spin_lock_irqsave(&zone->lock, flags);
+       zone->nr_isolate_pageblock -= nr_isolate_pageblock;
+       spin_unlock_irqrestore(&zone->lock, flags);
+
        /* removal success */
        adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
        zone->present_pages -= offlined_pages;
@@ -1690,12 +1699,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
 failed_removal_isolated:
        undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+       memory_notify(MEM_CANCEL_OFFLINE, &arg);
 failed_removal:
        pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
                 (unsigned long long) start_pfn << PAGE_SHIFT,
                 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
                 reason);
-       memory_notify(MEM_CANCEL_OFFLINE, &arg);
        /* pushback to free area */
        mem_hotplug_done();
        return ret;
index af171ccb56a29713a326b1018e38215700ffcfe5..2219e747df494e5799d5e1af97d6b456907bfa43 100644 (file)
@@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
        return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
 }
 
+/*
+ * queue_pages_pmd() has three possible return values:
+ * 1 - pages are placed on the right node or queued successfully.
+ * 0 - THP was split.
+ * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
+ *        page was already on a node that does not follow the policy.
+ */
 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
                                unsigned long end, struct mm_walk *walk)
 {
@@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        unsigned long flags;
 
        if (unlikely(is_pmd_migration_entry(*pmd))) {
-               ret = 1;
+               ret = -EIO;
                goto unlock;
        }
        page = pmd_page(*pmd);
@@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
        ret = 1;
        flags = qp->flags;
        /* go to thp migration */
-       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+               if (!vma_migratable(walk->vma)) {
+                       ret = -EIO;
+                       goto unlock;
+               }
+
                migrate_page_add(page, qp->pagelist, flags);
+       } else
+               ret = -EIO;
 unlock:
        spin_unlock(ptl);
 out:
@@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
        ptl = pmd_trans_huge_lock(pmd, vma);
        if (ptl) {
                ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
-               if (ret)
+               if (ret > 0)
                        return 0;
+               else if (ret < 0)
+                       return ret;
        }
 
        if (pmd_trans_unstable(pmd))
@@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                        continue;
                if (!queue_pages_required(page, qp))
                        continue;
-               migrate_page_add(page, qp->pagelist, flags);
+               if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
+                       if (!vma_migratable(vma))
+                               break;
+                       migrate_page_add(page, qp->pagelist, flags);
+               } else
+                       break;
        }
        pte_unmap_unlock(pte - 1, ptl);
        cond_resched();
-       return 0;
+       return addr != end ? -EIO : 0;
 }
 
 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
@@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        unsigned long endvma = vma->vm_end;
        unsigned long flags = qp->flags;
 
-       if (!vma_migratable(vma))
+       /*
+        * Need check MPOL_MF_STRICT to return -EIO if possible
+        * regardless of vma_migratable
+        */
+       if (!vma_migratable(vma) &&
+           !(flags & MPOL_MF_STRICT))
                return 1;
 
        if (endvma > end)
@@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
        }
 
        /* queue pages from current vma */
-       if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
+       if (flags & MPOL_MF_VALID)
                return 0;
        return 1;
 }
index ac6f4939bb5975a2cab6e78419562c1529e12219..663a5449367a4204e937491d2d9032b0a3768bdf 100644 (file)
@@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
                                pte = swp_entry_to_pte(entry);
                        } else if (is_device_public_page(new)) {
                                pte = pte_mkdevmap(pte);
-                               flush_dcache_page(new);
                        }
-               } else
-                       flush_dcache_page(new);
+               }
 
 #ifdef CONFIG_HUGETLB_PAGE
                if (PageHuge(new)) {
@@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
                 */
                if (!PageMappingFlags(page))
                        page->mapping = NULL;
+
+               if (unlikely(is_zone_device_page(newpage))) {
+                       if (is_device_public_page(newpage))
+                               flush_dcache_page(newpage);
+               } else
+                       flush_dcache_page(newpage);
+
        }
 out:
        return rc;
index 03fcf73d47dabde0987f3542c3c87fca33bf5a5d..d96ca5bc555bbc432e135c876151e0699ee88162 100644 (file)
@@ -8233,7 +8233,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        ret = start_isolate_page_range(pfn_max_align_down(start),
                                       pfn_max_align_up(end), migratetype, 0);
-       if (ret)
+       if (ret < 0)
                return ret;
 
        /*
index ce323e56b34d6bc43a9e16cc9054f35290bb0b63..019280712e1b8b7e075b51573b5c56d07aef3922 100644 (file)
@@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
         * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
         * We just check MOVABLE pages.
         */
-       if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
+       if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
+                                isol_flags))
                ret = 0;
 
        /*
@@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
        return NULL;
 }
 
-/*
- * start_isolate_page_range() -- make page-allocation-type of range of pages
- * to be MIGRATE_ISOLATE.
- * @start_pfn: The lower PFN of the range to be isolated.
- * @end_pfn: The upper PFN of the range to be isolated.
- * @migratetype: migrate type to set in error recovery.
+/**
+ * start_isolate_page_range() - make page-allocation-type of range of pages to
+ * be MIGRATE_ISOLATE.
+ * @start_pfn:         The lower PFN of the range to be isolated.
+ * @end_pfn:           The upper PFN of the range to be isolated.
+ *                     start_pfn/end_pfn must be aligned to pageblock_order.
+ * @migratetype:       Migrate type to set in error recovery.
+ * @flags:             The following flags are allowed (they can be combined in
+ *                     a bit mask)
+ *                     SKIP_HWPOISON - ignore hwpoison pages
+ *                     REPORT_FAILURE - report details about the failure to
+ *                     isolate the range
  *
  * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
  * the range will never be allocated. Any free pages and pages freed in the
- * future will not be allocated again.
- *
- * start_pfn/end_pfn must be aligned to pageblock_order.
- * Return 0 on success and -EBUSY if any part of range cannot be isolated.
+ * future will not be allocated again. If specified range includes migrate types
+ * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
+ * pages in the range finally, the caller have to free all pages in the range.
+ * test_page_isolated() can be used for test it.
  *
  * There is no high level synchronization mechanism that prevents two threads
- * from trying to isolate overlapping ranges.  If this happens, one thread
+ * from trying to isolate overlapping ranges. If this happens, one thread
  * will notice pageblocks in the overlapping range already set to isolate.
  * This happens in set_migratetype_isolate, and set_migratetype_isolate
- * returns an error.  We then clean up by restoring the migration type on
- * pageblocks we may have modified and return -EBUSY to caller.  This
+ * returns an error. We then clean up by restoring the migration type on
+ * pageblocks we may have modified and return -EBUSY to caller. This
  * prevents two threads from simultaneously working on overlapping ranges.
+ *
+ * Return: the number of isolated pageblocks on success and -EBUSY if any part
+ * of range cannot be isolated.
  */
 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
                             unsigned migratetype, int flags)
@@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
        unsigned long pfn;
        unsigned long undo_pfn;
        struct page *page;
+       int nr_isolate_pageblock = 0;
 
        BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
        BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
@@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
             pfn < end_pfn;
             pfn += pageblock_nr_pages) {
                page = __first_valid_page(pfn, pageblock_nr_pages);
-               if (page &&
-                   set_migratetype_isolate(page, migratetype, flags)) {
-                       undo_pfn = pfn;
-                       goto undo;
+               if (page) {
+                       if (set_migratetype_isolate(page, migratetype, flags)) {
+                               undo_pfn = pfn;
+                               goto undo;
+                       }
+                       nr_isolate_pageblock++;
                }
        }
-       return 0;
+       return nr_isolate_pageblock;
 undo:
        for (pfn = start_pfn;
             pfn < undo_pfn;
index 28652e4218e0c1e5da82e2094d5ec43046e9b472..329bfe67f2cae966f07d930be7e10e2041505acb 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2115,6 +2115,8 @@ done:
        cachep->allocflags = __GFP_COMP;
        if (flags & SLAB_CACHE_DMA)
                cachep->allocflags |= GFP_DMA;
+       if (flags & SLAB_CACHE_DMA32)
+               cachep->allocflags |= GFP_DMA32;
        if (flags & SLAB_RECLAIM_ACCOUNT)
                cachep->allocflags |= __GFP_RECLAIMABLE;
        cachep->size = size;
index e5e6658eeacca81c694ccef400d19bbcd138d6ab..43ac818b8592bc472b4b67e19831b404cc798aca 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
 
 
 /* Legal flag mask for kmem_cache_create(), for various configurations */
-#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
+#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
+                        SLAB_CACHE_DMA32 | SLAB_PANIC | \
                         SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
 
 #if defined(CONFIG_DEBUG_SLAB)
index 03eeb8b7b4b1d5d9fc0a395459478c79ad8a2656..58251ba63e4a19fb9262c6adb59831075a858dd5 100644 (file)
@@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
                SLAB_FAILSLAB | SLAB_KASAN)
 
 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
-                        SLAB_ACCOUNT)
+                        SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
index 1b08fbcb7e61fbcc5fa84738dc09e88050d2bd2b..d30ede89f4a6499a07e69baf981b755d0a1b4400 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3589,6 +3589,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
        if (s->flags & SLAB_CACHE_DMA)
                s->allocflags |= GFP_DMA;
 
+       if (s->flags & SLAB_CACHE_DMA32)
+               s->allocflags |= GFP_DMA32;
+
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                s->allocflags |= __GFP_RECLAIMABLE;
 
@@ -5679,6 +5682,8 @@ static char *create_unique_id(struct kmem_cache *s)
         */
        if (s->flags & SLAB_CACHE_DMA)
                *p++ = 'd';
+       if (s->flags & SLAB_CACHE_DMA32)
+               *p++ = 'D';
        if (s->flags & SLAB_RECLAIM_ACCOUNT)
                *p++ = 'a';
        if (s->flags & SLAB_CONSISTENCY_CHECKS)
index 69904aa6165bf13b89a44d6abf84609fea2076ba..56e057c432f9663439c4cfd38f8cad8f2e6e2c3d 100644 (file)
@@ -567,7 +567,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 }
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-/* Mark all memory sections within the pfn range as online */
+/* Mark all memory sections within the pfn range as offline */
 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
 {
        unsigned long pfn;
index 49a16cee2aae97f013ef79401feb0b4de3c4a44e..420a98bf79b536d11f862026ff71747a67301cf4 100644 (file)
@@ -879,15 +879,24 @@ static struct notifier_block aarp_notifier = {
 
 static unsigned char aarp_snap_id[] = { 0x00, 0x00, 0x00, 0x80, 0xF3 };
 
-void __init aarp_proto_init(void)
+int __init aarp_proto_init(void)
 {
+       int rc;
+
        aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
-       if (!aarp_dl)
+       if (!aarp_dl) {
                printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
+               return -ENOMEM;
+       }
        timer_setup(&aarp_timer, aarp_expire_timeout, 0);
        aarp_timer.expires  = jiffies + sysctl_aarp_expiry_time;
        add_timer(&aarp_timer);
-       register_netdevice_notifier(&aarp_notifier);
+       rc = register_netdevice_notifier(&aarp_notifier);
+       if (rc) {
+               del_timer_sync(&aarp_timer);
+               unregister_snap_client(aarp_dl);
+       }
+       return rc;
 }
 
 /* Remove the AARP entries associated with a device. */
index 795fbc6c06aa7a9e7078aafafad97024373afeb3..709d2542f7295ee71a5ddb201f81fed09ac669cb 100644 (file)
@@ -1904,9 +1904,6 @@ static unsigned char ddp_snap_id[] = { 0x08, 0x00, 0x07, 0x80, 0x9B };
 EXPORT_SYMBOL(atrtr_get_dev);
 EXPORT_SYMBOL(atalk_find_dev_addr);
 
-static const char atalk_err_snap[] __initconst =
-       KERN_CRIT "Unable to register DDP with SNAP.\n";
-
 /* Called by proto.c on kernel start up */
 static int __init atalk_init(void)
 {
@@ -1921,17 +1918,22 @@ static int __init atalk_init(void)
                goto out_proto;
 
        ddp_dl = register_snap_client(ddp_snap_id, atalk_rcv);
-       if (!ddp_dl)
-               printk(atalk_err_snap);
+       if (!ddp_dl) {
+               pr_crit("Unable to register DDP with SNAP.\n");
+               goto out_sock;
+       }
 
        dev_add_pack(&ltalk_packet_type);
        dev_add_pack(&ppptalk_packet_type);
 
        rc = register_netdevice_notifier(&ddp_notifier);
        if (rc)
-               goto out_sock;
+               goto out_snap;
+
+       rc = aarp_proto_init();
+       if (rc)
+               goto out_dev;
 
-       aarp_proto_init();
        rc = atalk_proc_init();
        if (rc)
                goto out_aarp;
@@ -1945,11 +1947,13 @@ out_proc:
        atalk_proc_exit();
 out_aarp:
        aarp_cleanup_module();
+out_dev:
        unregister_netdevice_notifier(&ddp_notifier);
-out_sock:
+out_snap:
        dev_remove_pack(&ppptalk_packet_type);
        dev_remove_pack(&ltalk_packet_type);
        unregister_snap_client(ddp_dl);
+out_sock:
        sock_unregister(PF_APPLETALK);
 out_proto:
        proto_unregister(&ddp_proto);
index 9d34de68571be8969ee7a57d9f2eb680777b1cea..22afa566cbce9cd6d58abe3ead14ffd7199fd18d 100644 (file)
@@ -502,6 +502,7 @@ static unsigned int br_nf_pre_routing(void *priv,
        nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
 
        skb->protocol = htons(ETH_P_IP);
+       skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4;
 
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
index 564710f88f938cb314f2f59177289ecb127454f4..e88d6641647bab45397f5206737b367ea60cb9b0 100644 (file)
@@ -235,6 +235,8 @@ unsigned int br_nf_pre_routing_ipv6(void *priv,
        nf_bridge->ipv6_daddr = ipv6_hdr(skb)->daddr;
 
        skb->protocol = htons(ETH_P_IPV6);
+       skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
+
        NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, state->net, state->sk, skb,
                skb->dev, NULL,
                br_nf_pre_routing_finish_ipv6);
index 7e71b0df1fbc9185b192a43427c7cb281b778ca1..3083988ce729dbe01771e9433b7de72e484394f9 100644 (file)
@@ -840,6 +840,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct ceph_bio_iter *it = &cursor->bio_iter;
+       struct page *page = bio_iter_page(it->bio, it->iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bio_iter_len(it->bio, it->iter));
@@ -851,7 +852,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done))
+       if (!bytes || (it->iter.bi_size && it->iter.bi_bvec_done &&
+                      page == bio_iter_page(it->bio, it->iter)))
                return false;   /* more bytes to process in this segment */
 
        if (!it->iter.bi_size) {
@@ -899,6 +901,7 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                                        size_t bytes)
 {
        struct bio_vec *bvecs = cursor->data->bvec_pos.bvecs;
+       struct page *page = bvec_iter_page(bvecs, cursor->bvec_iter);
 
        BUG_ON(bytes > cursor->resid);
        BUG_ON(bytes > bvec_iter_len(bvecs, cursor->bvec_iter));
@@ -910,7 +913,8 @@ static bool ceph_msg_data_bvecs_advance(struct ceph_msg_data_cursor *cursor,
                return false;   /* no more data */
        }
 
-       if (!bytes || cursor->bvec_iter.bi_bvec_done)
+       if (!bytes || (cursor->bvec_iter.bi_bvec_done &&
+                      page == bvec_iter_page(bvecs, cursor->bvec_iter)))
                return false;   /* more bytes to process in this segment */
 
        BUG_ON(cursor->last_piece);
index 78e22cea4cc79589e22781849a843397904e220c..da0a29f30885d1f54b975ecfa4f83b47c68be543 100644 (file)
@@ -3897,6 +3897,11 @@ static int devlink_nl_cmd_info_get_dumpit(struct sk_buff *msg,
                        continue;
                }
 
+               if (!devlink->ops->info_get) {
+                       idx++;
+                       continue;
+               }
+
                mutex_lock(&devlink->lock);
                err = devlink_nl_info_fill(msg, devlink, DEVLINK_CMD_INFO_GET,
                                           NETLINK_CB(cb->skb).portid,
index f274620945ff06085beaf411d7d6f9912ec4ba66..647c63a7b25b6745e75a812b65a4052f3c72b690 100644 (file)
@@ -1796,8 +1796,6 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
 
 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
 {
-       sk = sk_to_full_sk(sk);
-
        return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
 }
 
@@ -5266,7 +5264,7 @@ static const struct bpf_func_proto bpf_sk_release_proto = {
        .func           = bpf_sk_release,
        .gpl_only       = false,
        .ret_type       = RET_INTEGER,
-       .arg1_type      = ARG_PTR_TO_SOCKET,
+       .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
 };
 
 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
@@ -5407,8 +5405,6 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
 
 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
 {
-       sk = sk_to_full_sk(sk);
-
        if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
                return (unsigned long)sk;
 
@@ -5422,6 +5418,23 @@ static const struct bpf_func_proto bpf_tcp_sock_proto = {
        .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
 };
 
+BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
+{
+       sk = sk_to_full_sk(sk);
+
+       if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
+               return (unsigned long)sk;
+
+       return (unsigned long)NULL;
+}
+
+static const struct bpf_func_proto bpf_get_listener_sock_proto = {
+       .func           = bpf_get_listener_sock,
+       .gpl_only       = false,
+       .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
+       .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
+};
+
 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
 {
        unsigned int iphdr_len;
@@ -5607,6 +5620,8 @@ cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
 #ifdef CONFIG_INET
        case BPF_FUNC_tcp_sock:
                return &bpf_tcp_sock_proto;
+       case BPF_FUNC_get_listener_sock:
+               return &bpf_get_listener_sock_proto;
        case BPF_FUNC_skb_ecn_set_ce:
                return &bpf_skb_ecn_set_ce_proto;
 #endif
@@ -5702,6 +5717,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
                return &bpf_sk_release_proto;
        case BPF_FUNC_tcp_sock:
                return &bpf_tcp_sock_proto;
+       case BPF_FUNC_get_listener_sock:
+               return &bpf_get_listener_sock_proto;
 #endif
        default:
                return bpf_base_func_proto(func_id);
index 4ff661f6f989ae10ca49a1e81c825be56683d026..f8f94303a1f57203eaa28b5ea459ac28c89e1b12 100644 (file)
@@ -928,6 +928,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
        if (error)
                return error;
 
+       dev_hold(queue->dev);
+
        if (dev->sysfs_rx_queue_group) {
                error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
                if (error) {
@@ -937,7 +939,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
        }
 
        kobject_uevent(kobj, KOBJ_ADD);
-       dev_hold(queue->dev);
 
        return error;
 }
@@ -1464,6 +1465,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
        if (error)
                return error;
 
+       dev_hold(queue->dev);
+
 #ifdef CONFIG_BQL
        error = sysfs_create_group(kobj, &dql_group);
        if (error) {
@@ -1473,7 +1476,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
 #endif
 
        kobject_uevent(kobj, KOBJ_ADD);
-       dev_hold(queue->dev);
 
        return 0;
 }
@@ -1745,16 +1747,20 @@ int netdev_register_kobject(struct net_device *ndev)
 
        error = device_add(dev);
        if (error)
-               return error;
+               goto error_put_device;
 
        error = register_queue_kobjects(ndev);
-       if (error) {
-               device_del(dev);
-               return error;
-       }
+       if (error)
+               goto error_device_del;
 
        pm_runtime_set_memalloc_noio(dev, true);
 
+       return 0;
+
+error_device_del:
+       device_del(dev);
+error_put_device:
+       put_device(dev);
        return error;
 }
 
index d5740bad5b1811cd42e44fb3b0da6edabbf18095..57d84e9b7b6fc820a4616e3ac326633f14e2fd95 100644 (file)
@@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
                newnp->ipv6_mc_list = NULL;
                newnp->ipv6_ac_list = NULL;
                newnp->ipv6_fl_list = NULL;
-               newnp->mcast_oif   = inet6_iif(skb);
-               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
+               newnp->mcast_oif   = inet_iif(skb);
+               newnp->mcast_hops  = ip_hdr(skb)->ttl;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
index 1059894a6f4c3f009a92b30fb257e6b35f3a4a26..4cb83fb69844354d6d5d6ad18d552060e9e28b84 100644 (file)
@@ -210,6 +210,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                psidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
                          ((srh->segments_left + 1) * sizeof(struct in6_addr));
                psid = skb_header_pointer(skb, psidoff, sizeof(_psid), &_psid);
+               if (!psid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_PSID,
                                ipv6_masked_addr_cmp(psid, &srhinfo->psid_msk,
                                                     &srhinfo->psid_addr)))
@@ -223,6 +225,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                nsidoff = srhoff + sizeof(struct ipv6_sr_hdr) +
                          ((srh->segments_left - 1) * sizeof(struct in6_addr));
                nsid = skb_header_pointer(skb, nsidoff, sizeof(_nsid), &_nsid);
+               if (!nsid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_NSID,
                                ipv6_masked_addr_cmp(nsid, &srhinfo->nsid_msk,
                                                     &srhinfo->nsid_addr)))
@@ -233,6 +237,8 @@ static bool srh1_mt6(const struct sk_buff *skb, struct xt_action_param *par)
        if (srhinfo->mt_flags & IP6T_SRH_LSID) {
                lsidoff = srhoff + sizeof(struct ipv6_sr_hdr);
                lsid = skb_header_pointer(skb, lsidoff, sizeof(_lsid), &_lsid);
+               if (!lsid)
+                       return false;
                if (NF_SRH_INVF(srhinfo, IP6T_SRH_INV_LSID,
                                ipv6_masked_addr_cmp(lsid, &srhinfo->lsid_msk,
                                                     &srhinfo->lsid_addr)))
index 4ef4bbdb49d4b203974bf95de47534921baed3f6..0302e0eb07af1d270a615bcadfcb9bc08ca61d6c 100644 (file)
@@ -1040,14 +1040,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
        struct rt6_info *nrt;
 
        if (!fib6_info_hold_safe(rt))
-               return NULL;
+               goto fallback;
 
        nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
-       if (nrt)
-               ip6_rt_copy_init(nrt, rt);
-       else
+       if (!nrt) {
                fib6_info_release(rt);
+               goto fallback;
+       }
 
+       ip6_rt_copy_init(nrt, rt);
+       return nrt;
+
+fallback:
+       nrt = dev_net(dev)->ipv6.ip6_null_entry;
+       dst_hold(&nrt->dst);
        return nrt;
 }
 
@@ -1096,10 +1102,6 @@ restart:
                dst_hold(&rt->dst);
        } else {
                rt = ip6_create_rt_rcu(f6i);
-               if (!rt) {
-                       rt = net->ipv6.ip6_null_entry;
-                       dst_hold(&rt->dst);
-               }
        }
 
        rcu_read_unlock();
index 57ef69a1088908fc624ecfca99a728fa296ae0bf..44d431849d391d6903d263ae547fc9bed1e67aa7 100644 (file)
@@ -1110,11 +1110,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
                newnp->ipv6_fl_list = NULL;
                newnp->pktoptions  = NULL;
                newnp->opt         = NULL;
-               newnp->mcast_oif   = tcp_v6_iif(skb);
-               newnp->mcast_hops  = ipv6_hdr(skb)->hop_limit;
-               newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
+               newnp->mcast_oif   = inet_iif(skb);
+               newnp->mcast_hops  = ip_hdr(skb)->ttl;
+               newnp->rcv_flowinfo = 0;
                if (np->repflow)
-                       newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
+                       newnp->flow_label = 0;
 
                /*
                 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
index dda8930f20e790c77c808674f6e35b133bb5657a..f3a8557494d60e4d1ffe1f89fa32ea00c13eabab 100644 (file)
@@ -140,9 +140,15 @@ static int mpls_xmit(struct sk_buff *skb)
        if (rt)
                err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
                                 skb);
-       else if (rt6)
-               err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
-                                skb);
+       else if (rt6) {
+               if (ipv6_addr_v4mapped(&rt6->rt6i_gateway)) {
+                       /* 6PE (RFC 4798) */
+                       err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt6->rt6i_gateway.s6_addr32[3],
+                                        skb);
+               } else
+                       err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway,
+                                        skb);
+       }
        if (err)
                net_dbg_ratelimited("%s: packet transmission failed: %d\n",
                                    __func__, err);
index 5d782445d2fcf629367777f415e000eb326eab2a..bad17bba8ba786f589212a2575d346850bab6300 100644 (file)
@@ -251,6 +251,10 @@ static int ncsi_pkg_info_all_nl(struct sk_buff *skb,
        }
 
        attr = nla_nest_start(skb, NCSI_ATTR_PACKAGE_LIST);
+       if (!attr) {
+               rc = -EMSGSIZE;
+               goto err;
+       }
        rc = ncsi_write_package_info(skb, ndp, package->id);
        if (rc) {
                nla_nest_cancel(skb, attr);
index d43ffb09939bd3641b213b826a3e0229bcdbb550..6548271209a05c2fce99628c9b23d2cedbf8a087 100644 (file)
@@ -1007,6 +1007,7 @@ config NETFILTER_XT_TARGET_TEE
        depends on NETFILTER_ADVANCED
        depends on IPV6 || IPV6=n
        depends on !NF_CONNTRACK || NF_CONNTRACK
+       depends on IP6_NF_IPTABLES || !IP6_NF_IPTABLES
        select NF_DUP_IPV4
        select NF_DUP_IPV6 if IP6_NF_IPTABLES
        ---help---
index f067c6b508572a9ab31bc4b3b6281e1482ffdc6d..39fcc1ed18f3501b3120fc9aeffbe44e27fda933 100644 (file)
@@ -20,9 +20,9 @@
 #include <linux/udp.h>
 #include <linux/tcp.h>
 #include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
 
-#include <net/route.h>
-#include <net/ip6_route.h>
 #include <net/netfilter/nf_conntrack.h>
 #include <net/netfilter/nf_conntrack_core.h>
 #include <net/netfilter/nf_conntrack_expect.h>
@@ -871,38 +871,33 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
        } else if (sip_external_media) {
                struct net_device *dev = skb_dst(skb)->dev;
                struct net *net = dev_net(dev);
-               struct rtable *rt;
-               struct flowi4 fl4 = {};
-#if IS_ENABLED(CONFIG_IPV6)
-               struct flowi6 fl6 = {};
-#endif
+               struct flowi fl;
                struct dst_entry *dst = NULL;
 
+               memset(&fl, 0, sizeof(fl));
+
                switch (nf_ct_l3num(ct)) {
                        case NFPROTO_IPV4:
-                               fl4.daddr = daddr->ip;
-                               rt = ip_route_output_key(net, &fl4);
-                               if (!IS_ERR(rt))
-                                       dst = &rt->dst;
+                               fl.u.ip4.daddr = daddr->ip;
+                               nf_ip_route(net, &dst, &fl, false);
                                break;
 
-#if IS_ENABLED(CONFIG_IPV6)
                        case NFPROTO_IPV6:
-                               fl6.daddr = daddr->in6;
-                               dst = ip6_route_output(net, NULL, &fl6);
-                               if (dst->error) {
-                                       dst_release(dst);
-                                       dst = NULL;
-                               }
+                               fl.u.ip6.daddr = daddr->in6;
+                               nf_ip6_route(net, &dst, &fl, false);
                                break;
-#endif
                }
 
                /* Don't predict any conntracks when media endpoint is reachable
                 * through the same interface as the signalling peer.
                 */
-               if (dst && dst->dev == dev)
-                       return NF_ACCEPT;
+               if (dst) {
+                       bool external_media = (dst->dev == dev);
+
+                       dst_release(dst);
+                       if (external_media)
+                               return NF_ACCEPT;
+               }
        }
 
        /* We need to check whether the registration exists before attempting
index 513f931186043f2ded3f1844374decd79768c44b..ef7772e976cc802afc64ea25d28f1fbecde773be 100644 (file)
@@ -2806,8 +2806,11 @@ err2:
        nf_tables_rule_release(&ctx, rule);
 err1:
        for (i = 0; i < n; i++) {
-               if (info[i].ops != NULL)
+               if (info[i].ops) {
                        module_put(info[i].ops->type->owner);
+                       if (info[i].ops->type->release_ops)
+                               info[i].ops->type->release_ops(info[i].ops);
+               }
        }
        kvfree(info);
        return err;
index 457a9ceb46af2061546da95f46d05c3578c1826a..8dfa798ea68330645c1dea590d05e98539ca2aa7 100644 (file)
@@ -65,21 +65,34 @@ nla_put_failure:
        return -1;
 }
 
-static void nft_objref_destroy(const struct nft_ctx *ctx,
-                              const struct nft_expr *expr)
+static void nft_objref_deactivate(const struct nft_ctx *ctx,
+                                 const struct nft_expr *expr,
+                                 enum nft_trans_phase phase)
 {
        struct nft_object *obj = nft_objref_priv(expr);
 
+       if (phase == NFT_TRANS_COMMIT)
+               return;
+
        obj->use--;
 }
 
+static void nft_objref_activate(const struct nft_ctx *ctx,
+                               const struct nft_expr *expr)
+{
+       struct nft_object *obj = nft_objref_priv(expr);
+
+       obj->use++;
+}
+
 static struct nft_expr_type nft_objref_type;
 static const struct nft_expr_ops nft_objref_ops = {
        .type           = &nft_objref_type,
        .size           = NFT_EXPR_SIZE(sizeof(struct nft_object *)),
        .eval           = nft_objref_eval,
        .init           = nft_objref_init,
-       .destroy        = nft_objref_destroy,
+       .activate       = nft_objref_activate,
+       .deactivate     = nft_objref_deactivate,
        .dump           = nft_objref_dump,
 };
 
index f8092926f704add7a7cc6843b89d9a509a5db046..a340cd8a751b483766e4ed7274ce0fb2c2b193e2 100644 (file)
@@ -233,5 +233,5 @@ module_exit(nft_redir_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo@debian.org>");
-MODULE_ALIAS_NFT_AF_EXPR(AF_INET4, "redir");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "redir");
 MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "redir");
index fa61208371f8b222ceb43388773f5c19c691d764..321a0036fdf5b95cc8d356b63ad47fc76826498e 100644 (file)
@@ -308,10 +308,6 @@ static void *nft_rbtree_deactivate(const struct net *net,
                else if (d > 0)
                        parent = parent->rb_right;
                else {
-                       if (!nft_set_elem_active(&rbe->ext, genmask)) {
-                               parent = parent->rb_left;
-                               continue;
-                       }
                        if (nft_rbtree_interval_end(rbe) &&
                            !nft_rbtree_interval_end(this)) {
                                parent = parent->rb_left;
@@ -320,6 +316,9 @@ static void *nft_rbtree_deactivate(const struct net *net,
                                   nft_rbtree_interval_end(this)) {
                                parent = parent->rb_right;
                                continue;
+                       } else if (!nft_set_elem_active(&rbe->ext, genmask)) {
+                               parent = parent->rb_left;
+                               continue;
                        }
                        nft_rbtree_flush(net, set, rbe);
                        return rbe;
index 25eeb6d2a75a69059f387be103345e844284f743..f0ec068e1d02fc0ebd39c2b1ecd935a301647ab1 100644 (file)
@@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
                               start, end + 1, GFP_KERNEL);
        if (family->id < 0) {
                err = family->id;
-               goto errout_locked;
+               goto errout_free;
        }
 
        err = genl_validate_assign_mc_groups(family);
@@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
 
 errout_remove:
        idr_remove(&genl_fam_idr, family->id);
+errout_free:
        kfree(family->attrbuf);
 errout_locked:
        genl_unlock_all();
index ae296273ce3db96cdaeafba66a7ff460d8a59794..17dcd0b5eb3287989d5a72a19194bc5674f3cb1e 100644 (file)
@@ -726,6 +726,10 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        llcp_sock->service_name = kmemdup(addr->service_name,
                                          llcp_sock->service_name_len,
                                          GFP_KERNEL);
+       if (!llcp_sock->service_name) {
+               ret = -ENOMEM;
+               goto sock_llcp_release;
+       }
 
        nfc_llcp_sock_link(&local->connecting_sockets, sk);
 
@@ -745,10 +749,11 @@ static int llcp_sock_connect(struct socket *sock, struct sockaddr *_addr,
        return ret;
 
 sock_unlink:
-       nfc_llcp_put_ssap(local, llcp_sock->ssap);
-
        nfc_llcp_sock_unlink(&local->connecting_sockets, sk);
 
+sock_llcp_release:
+       nfc_llcp_put_ssap(local, llcp_sock->ssap);
+
 put_dev:
        nfc_put_device(dev);
 
index 6679e96ab1dcdf8761845b863c39e1b6aac20d2e..9dd158ab51b310e28354237ff3bcd101a8b829f5 100644 (file)
@@ -448,6 +448,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
                             0, upcall_info->cmd);
+       if (!upcall) {
+               err = -EINVAL;
+               goto out;
+       }
        upcall->dp_ifindex = dp_ifindex;
 
        err = ovs_nla_put_key(key, key, OVS_PACKET_ATTR_KEY, false, user_skb);
@@ -460,6 +464,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->egress_tun_info) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_EGRESS_TUN_KEY);
+               if (!nla) {
+                       err = -EMSGSIZE;
+                       goto out;
+               }
                err = ovs_nla_put_tunnel_info(user_skb,
                                              upcall_info->egress_tun_info);
                BUG_ON(err);
@@ -468,6 +476,10 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
 
        if (upcall_info->actions_len) {
                nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_ACTIONS);
+               if (!nla) {
+                       err = -EMSGSIZE;
+                       goto out;
+               }
                err = ovs_nla_put_actions(upcall_info->actions,
                                          upcall_info->actions_len,
                                          user_skb);
index 8376bc1c1508170aa333f8feaa8a154908e0eb3c..9419c5cf4de5e8443fd760c0f73612ce691483a9 100644 (file)
@@ -1852,7 +1852,8 @@ oom:
 
 static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
 {
-       if (!skb->protocol && sock->type == SOCK_RAW) {
+       if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
+           sock->type == SOCK_RAW) {
                skb_reset_mac_header(skb);
                skb->protocol = dev_parse_header_protocol(skb);
        }
@@ -3243,7 +3244,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
        }
 
        mutex_lock(&net->packet.sklist_lock);
-       sk_add_node_rcu(sk, &net->packet.sklist);
+       sk_add_node_tail_rcu(sk, &net->packet.sklist);
        mutex_unlock(&net->packet.sklist_lock);
 
        preempt_disable();
@@ -4209,7 +4210,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
        struct pgv *pg_vec;
        int i;
 
-       pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
+       pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
        if (unlikely(!pg_vec))
                goto out;
 
index 7ca57741b2fbbbc8f5ccf139f5ffbe56b969c458..7849f286bb9331dbfce00e58cbe0c325a36894f5 100644 (file)
@@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
        struct sk_buff *skb;
        unsigned char  *dptr;
        unsigned char  lci1, lci2;
-       char buffer[100];
-       int len, faclen = 0;
+       int maxfaclen = 0;
+       int len, faclen;
+       int reserve;
 
-       len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
+       reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
+       len = ROSE_MIN_LEN;
 
        switch (frametype) {
        case ROSE_CALL_REQUEST:
                len   += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
-               faclen = rose_create_facilities(buffer, rose);
-               len   += faclen;
+               maxfaclen = 256;
                break;
        case ROSE_CALL_ACCEPTED:
        case ROSE_CLEAR_REQUEST:
@@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
                break;
        }
 
-       if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
+       skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
+       if (!skb)
                return;
 
        /*
         *      Space for AX.25 header and PID.
         */
-       skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
+       skb_reserve(skb, reserve);
 
-       dptr = skb_put(skb, skb_tailroom(skb));
+       dptr = skb_put(skb, len);
 
        lci1 = (rose->lci >> 8) & 0x0F;
        lci2 = (rose->lci >> 0) & 0xFF;
@@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
                dptr   += ROSE_ADDR_LEN;
                memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
                dptr   += ROSE_ADDR_LEN;
-               memcpy(dptr, buffer, faclen);
+               faclen = rose_create_facilities(dptr, rose);
+               skb_put(skb, faclen);
                dptr   += faclen;
                break;
 
index 736aa92811004cfe5d157abd4827710783f8d57c..004c762c2e8d063cfda32c0f93325fb779f08737 100644 (file)
@@ -335,7 +335,6 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
        struct kvec iov[2];
        rxrpc_serial_t serial;
        size_t len;
-       bool lost = false;
        int ret, opt;
 
        _enter(",{%d}", skb->len);
@@ -393,14 +392,14 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
                static int lose;
                if ((lose++ & 7) == 7) {
                        ret = 0;
-                       lost = true;
+                       trace_rxrpc_tx_data(call, sp->hdr.seq, serial,
+                                           whdr.flags, retrans, true);
+                       goto done;
                }
        }
 
-       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags,
-                           retrans, lost);
-       if (lost)
-               goto done;
+       trace_rxrpc_tx_data(call, sp->hdr.seq, serial, whdr.flags, retrans,
+                           false);
 
        /* send the packet with the don't fragment bit set if we currently
         * think it's small enough */
index 1b9afdee5ba976ba64200d8f85050cf053b7d65c..5c02ad97ef239a5eb22df8b22be80010a77b0151 100644 (file)
@@ -358,8 +358,7 @@ config NET_SCH_PIE
        help
          Say Y here if you want to use the Proportional Integral controller
          Enhanced scheduler packet scheduling algorithm.
-         For more information, please see
-         http://tools.ietf.org/html/draft-pan-tsvwg-pie-00
+         For more information, please see https://tools.ietf.org/html/rfc8033
 
          To compile this driver as a module, choose M here: the module
          will be called sch_pie.
index aecf1bf233c8362673812b5ab212f32e5f868a5b..5a87e271d35a2416b3589888bcfadee0c31b2142 100644 (file)
 #include <net/act_api.h>
 #include <net/netlink.h>
 
-static int tcf_action_goto_chain_init(struct tc_action *a, struct tcf_proto *tp)
-{
-       u32 chain_index = a->tcfa_action & TC_ACT_EXT_VAL_MASK;
-
-       if (!tp)
-               return -EINVAL;
-       a->goto_chain = tcf_chain_get_by_act(tp->chain->block, chain_index);
-       if (!a->goto_chain)
-               return -ENOMEM;
-       return 0;
-}
-
-static void tcf_action_goto_chain_fini(struct tc_action *a)
-{
-       tcf_chain_put_by_act(a->goto_chain);
-}
-
 static void tcf_action_goto_chain_exec(const struct tc_action *a,
                                       struct tcf_result *res)
 {
-       const struct tcf_chain *chain = a->goto_chain;
+       const struct tcf_chain *chain = rcu_dereference_bh(a->goto_chain);
 
        res->goto_tp = rcu_dereference_bh(chain->filter_chain);
 }
@@ -71,6 +54,51 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
                call_rcu(&old->rcu, tcf_free_cookie_rcu);
 }
 
+int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
+                            struct tcf_chain **newchain,
+                            struct netlink_ext_ack *extack)
+{
+       int opcode = TC_ACT_EXT_OPCODE(action), ret = -EINVAL;
+       u32 chain_index;
+
+       if (!opcode)
+               ret = action > TC_ACT_VALUE_MAX ? -EINVAL : 0;
+       else if (opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC)
+               ret = 0;
+       if (ret) {
+               NL_SET_ERR_MSG(extack, "invalid control action");
+               goto end;
+       }
+
+       if (TC_ACT_EXT_CMP(action, TC_ACT_GOTO_CHAIN)) {
+               chain_index = action & TC_ACT_EXT_VAL_MASK;
+               if (!tp || !newchain) {
+                       ret = -EINVAL;
+                       NL_SET_ERR_MSG(extack,
+                                      "can't goto NULL proto/chain");
+                       goto end;
+               }
+               *newchain = tcf_chain_get_by_act(tp->chain->block, chain_index);
+               if (!*newchain) {
+                       ret = -ENOMEM;
+                       NL_SET_ERR_MSG(extack,
+                                      "can't allocate goto_chain");
+               }
+       }
+end:
+       return ret;
+}
+EXPORT_SYMBOL(tcf_action_check_ctrlact);
+
+struct tcf_chain *tcf_action_set_ctrlact(struct tc_action *a, int action,
+                                        struct tcf_chain *goto_chain)
+{
+       a->tcfa_action = action;
+       rcu_swap_protected(a->goto_chain, goto_chain, 1);
+       return goto_chain;
+}
+EXPORT_SYMBOL(tcf_action_set_ctrlact);
+
 /* XXX: For standalone actions, we don't need a RCU grace period either, because
  * actions are always connected to filters and filters are already destroyed in
  * RCU callbacks, so after a RCU grace period actions are already disconnected
@@ -78,13 +106,15 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie,
  */
 static void free_tcf(struct tc_action *p)
 {
+       struct tcf_chain *chain = rcu_dereference_protected(p->goto_chain, 1);
+
        free_percpu(p->cpu_bstats);
        free_percpu(p->cpu_bstats_hw);
        free_percpu(p->cpu_qstats);
 
        tcf_set_action_cookie(&p->act_cookie, NULL);
-       if (p->goto_chain)
-               tcf_action_goto_chain_fini(p);
+       if (chain)
+               tcf_chain_put_by_act(chain);
 
        kfree(p);
 }
@@ -654,6 +684,10 @@ repeat:
                                        return TC_ACT_OK;
                        }
                } else if (TC_ACT_EXT_CMP(ret, TC_ACT_GOTO_CHAIN)) {
+                       if (unlikely(!rcu_access_pointer(a->goto_chain))) {
+                               net_warn_ratelimited("can't go to NULL chain!\n");
+                               return TC_ACT_SHOT;
+                       }
                        tcf_action_goto_chain_exec(a, res);
                }
 
@@ -800,15 +834,6 @@ static struct tc_cookie *nla_memdup_cookie(struct nlattr **tb)
        return c;
 }
 
-static bool tcf_action_valid(int action)
-{
-       int opcode = TC_ACT_EXT_OPCODE(action);
-
-       if (!opcode)
-               return action <= TC_ACT_VALUE_MAX;
-       return opcode <= TC_ACT_EXT_OPCODE_MAX || action == TC_ACT_UNSPEC;
-}
-
 struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
                                    struct nlattr *nla, struct nlattr *est,
                                    char *name, int ovr, int bind,
@@ -890,10 +915,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        /* backward compatibility for policer */
        if (name == NULL)
                err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, &a, ovr, bind,
-                               rtnl_held, extack);
+                               rtnl_held, tp, extack);
        else
                err = a_o->init(net, nla, est, &a, ovr, bind, rtnl_held,
-                               extack);
+                               tp, extack);
        if (err < 0)
                goto err_mod;
 
@@ -907,18 +932,10 @@ struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
        if (err != ACT_P_CREATED)
                module_put(a_o->owner);
 
-       if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN)) {
-               err = tcf_action_goto_chain_init(a, tp);
-               if (err) {
-                       tcf_action_destroy_1(a, bind);
-                       NL_SET_ERR_MSG(extack, "Failed to init TC action chain");
-                       return ERR_PTR(err);
-               }
-       }
-
-       if (!tcf_action_valid(a->tcfa_action)) {
+       if (TC_ACT_EXT_CMP(a->tcfa_action, TC_ACT_GOTO_CHAIN) &&
+           !rcu_access_pointer(a->goto_chain)) {
                tcf_action_destroy_1(a, bind);
-               NL_SET_ERR_MSG(extack, "Invalid control action value");
+               NL_SET_ERR_MSG(extack, "can't use goto chain with NULL chain");
                return ERR_PTR(-EINVAL);
        }
 
index aa5c38d11a3079644d36329c8b4637e0bdfaa5c6..3841156aa09f778c285765b51342cd5d218a34ac 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_bpf.h>
 #include <net/tc_act/tc_bpf.h>
@@ -278,10 +279,11 @@ static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
 static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **act,
                        int replace, int bind, bool rtnl_held,
-                       struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, bpf_net_id);
        struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_bpf_cfg cfg, old;
        struct tc_act_bpf *parm;
        struct tcf_bpf *prog;
@@ -323,12 +325,16 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
                return ret;
        }
 
+       ret = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (ret < 0)
+               goto release_idr;
+
        is_bpf = tb[TCA_ACT_BPF_OPS_LEN] && tb[TCA_ACT_BPF_OPS];
        is_ebpf = tb[TCA_ACT_BPF_FD];
 
        if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf)) {
                ret = -EINVAL;
-               goto out;
+               goto put_chain;
        }
 
        memset(&cfg, 0, sizeof(cfg));
@@ -336,7 +342,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        ret = is_bpf ? tcf_bpf_init_from_ops(tb, &cfg) :
                       tcf_bpf_init_from_efd(tb, &cfg);
        if (ret < 0)
-               goto out;
+               goto put_chain;
 
        prog = to_bpf(*act);
 
@@ -350,10 +356,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        if (cfg.bpf_num_ops)
                prog->bpf_num_ops = cfg.bpf_num_ops;
 
-       prog->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*act, parm->action, goto_ch);
        rcu_assign_pointer(prog->filter, cfg.filter);
        spin_unlock_bh(&prog->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
        if (res == ACT_P_CREATED) {
                tcf_idr_insert(tn, *act);
        } else {
@@ -363,9 +372,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
        }
 
        return res;
-out:
-       tcf_idr_release(*act, bind);
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
+release_idr:
+       tcf_idr_release(*act, bind);
        return ret;
 }
 
index 5d24993cccfebead613c7dd15bb41a1087c8024e..32ae0cd6e31c67e36793081ac11371c2250eb0f1 100644 (file)
@@ -21,6 +21,7 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 #include <uapi/linux/tc_act/tc_connmark.h>
 #include <net/tc_act/tc_connmark.h>
 
@@ -97,13 +98,15 @@ static const struct nla_policy connmark_policy[TCA_CONNMARK_MAX + 1] = {
 static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                             struct nlattr *est, struct tc_action **a,
                             int ovr, int bind, bool rtnl_held,
+                            struct tcf_proto *tp,
                             struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, connmark_net_id);
        struct nlattr *tb[TCA_CONNMARK_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_connmark_info *ci;
        struct tc_connmark *parm;
-       int ret = 0;
+       int ret = 0, err;
 
        if (!nla)
                return -EINVAL;
@@ -128,7 +131,11 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                }
 
                ci = to_connmark(*a);
-               ci->tcf_action = parm->action;
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
+               tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ci->net = net;
                ci->zone = parm->zone;
 
@@ -142,15 +149,24 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
                        tcf_idr_release(*a, bind);
                        return -EEXIST;
                }
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
                /* replacing action and zone */
                spin_lock_bh(&ci->tcf_lock);
-               ci->tcf_action = parm->action;
+               goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ci->zone = parm->zone;
                spin_unlock_bh(&ci->tcf_lock);
+               if (goto_ch)
+                       tcf_chain_put_by_act(goto_ch);
                ret = 0;
        }
 
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static inline int tcf_connmark_dump(struct sk_buff *skb, struct tc_action *a,
index c79aca29505e33a44b7402b3ccc1ccaf8827b280..0c77e7bdf6d5d282965eb3970f3b8934e570e0d4 100644 (file)
@@ -33,6 +33,7 @@
 #include <net/sctp/checksum.h>
 
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_csum.h>
 #include <net/tc_act/tc_csum.h>
@@ -46,12 +47,13 @@ static struct tc_action_ops act_csum_ops;
 
 static int tcf_csum_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a, int ovr,
-                        int bind, bool rtnl_held,
+                        int bind, bool rtnl_held, struct tcf_proto *tp,
                         struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, csum_net_id);
        struct tcf_csum_params *params_new;
        struct nlattr *tb[TCA_CSUM_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_csum *parm;
        struct tcf_csum *p;
        int ret = 0, err;
@@ -87,21 +89,27 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
                return err;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        p = to_tcf_csum(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
        params_new->update_flags = parm->update_flags;
 
        spin_lock_bh(&p->tcf_lock);
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(p->params, params_new,
                           lockdep_is_held(&p->tcf_lock));
        spin_unlock_bh(&p->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (params_new)
                kfree_rcu(params_new, rcu);
 
@@ -109,6 +117,12 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
                tcf_idr_insert(tn, *a);
 
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 /**
index 93da0004e9f415e9eb8439eeacdc0ae73f5783fd..e540e31069d746106eb82c5ea3c2f99f9438cbf4 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <linux/tc_act/tc_gact.h>
 #include <net/tc_act/tc_gact.h>
 
@@ -57,10 +58,11 @@ static const struct nla_policy gact_policy[TCA_GACT_MAX + 1] = {
 static int tcf_gact_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, gact_net_id);
        struct nlattr *tb[TCA_GACT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_gact *parm;
        struct tcf_gact *gact;
        int ret = 0;
@@ -116,10 +118,13 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
                return err;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
        gact = to_gact(*a);
 
        spin_lock_bh(&gact->tcf_lock);
-       gact->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 #ifdef CONFIG_GACT_PROB
        if (p_parm) {
                gact->tcfg_paction = p_parm->paction;
@@ -133,9 +138,15 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
 #endif
        spin_unlock_bh(&gact->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_gact_act(struct sk_buff *skb, const struct tc_action *a,
index 9b1f2b3990eedeeca44ba6ad5c7e334043727dd1..31c6ffb6abe7c607972de7b80fe77f03f8782723 100644 (file)
@@ -29,6 +29,7 @@
 #include <net/net_namespace.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 #include <uapi/linux/tc_act/tc_ife.h>
 #include <net/tc_act/tc_ife.h>
 #include <linux/etherdevice.h>
@@ -469,11 +470,12 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
 static int tcf_ife_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a,
                        int ovr, int bind, bool rtnl_held,
-                       struct netlink_ext_ack *extack)
+                       struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, ife_net_id);
        struct nlattr *tb[TCA_IFE_MAX + 1];
        struct nlattr *tb2[IFE_META_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_ife_params *p;
        struct tcf_ife_info *ife;
        u16 ife_type = ETH_P_IFE;
@@ -531,6 +533,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        }
 
        ife = to_ife(*a);
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        p->flags = parm->flags;
 
        if (parm->flags & IFE_ENCODE) {
@@ -563,13 +569,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
        if (tb[TCA_IFE_METALST]) {
                err = nla_parse_nested(tb2, IFE_META_MAX, tb[TCA_IFE_METALST],
                                       NULL, NULL);
-               if (err) {
-metadata_parse_err:
-                       tcf_idr_release(*a, bind);
-                       kfree(p);
-                       return err;
-               }
-
+               if (err)
+                       goto metadata_parse_err;
                err = populate_metalist(ife, tb2, exists, rtnl_held);
                if (err)
                        goto metadata_parse_err;
@@ -581,21 +582,20 @@ metadata_parse_err:
                 * going to bail out
                 */
                err = use_all_metadata(ife, exists);
-               if (err) {
-                       tcf_idr_release(*a, bind);
-                       kfree(p);
-                       return err;
-               }
+               if (err)
+                       goto metadata_parse_err;
        }
 
        if (exists)
                spin_lock_bh(&ife->tcf_lock);
-       ife->tcf_action = parm->action;
        /* protected by tcf_lock when modifying existing action */
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(ife->params, p, 1);
 
        if (exists)
                spin_unlock_bh(&ife->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (p)
                kfree_rcu(p, rcu);
 
@@ -603,6 +603,13 @@ metadata_parse_err:
                tcf_idr_insert(tn, *a);
 
        return ret;
+metadata_parse_err:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       kfree(p);
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
index 98f5b6ea77b46ea7a55e1c325fd60542020b2165..04a0b5c611943a4e10bfa0855f1c3928e4e141de 100644 (file)
@@ -97,7 +97,8 @@ static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
 
 static int __tcf_ipt_init(struct net *net, unsigned int id, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
-                         const struct tc_action_ops *ops, int ovr, int bind)
+                         const struct tc_action_ops *ops, int ovr, int bind,
+                         struct tcf_proto *tp)
 {
        struct tc_action_net *tn = net_generic(net, id);
        struct nlattr *tb[TCA_IPT_MAX + 1];
@@ -205,20 +206,20 @@ err1:
 
 static int tcf_ipt_init(struct net *net, struct nlattr *nla,
                        struct nlattr *est, struct tc_action **a, int ovr,
-                       int bind, bool rtnl_held,
+                       int bind, bool rtnl_held, struct tcf_proto *tp,
                        struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, ipt_net_id, nla, est, a, &act_ipt_ops, ovr,
-                             bind);
+                             bind, tp);
 }
 
 static int tcf_xt_init(struct net *net, struct nlattr *nla,
                       struct nlattr *est, struct tc_action **a, int ovr,
-                      int bind, bool unlocked,
+                      int bind, bool unlocked, struct tcf_proto *tp,
                       struct netlink_ext_ack *extack)
 {
        return __tcf_ipt_init(net, xt_net_id, nla, est, a, &act_xt_ops, ovr,
-                             bind);
+                             bind, tp);
 }
 
 static int tcf_ipt_act(struct sk_buff *skb, const struct tc_action *a,
index 6692fd0546177347a70123a959156d1120eace90..17cc6bd4c57c3a6f12786c3d1109c6e48af185dd 100644 (file)
@@ -94,10 +94,12 @@ static struct tc_action_ops act_mirred_ops;
 static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, mirred_net_id);
        struct nlattr *tb[TCA_MIRRED_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        bool mac_header_xmit = false;
        struct tc_mirred *parm;
        struct tcf_mirred *m;
@@ -157,18 +159,23 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+
        m = to_mirred(*a);
+       if (ret == ACT_P_CREATED)
+               INIT_LIST_HEAD(&m->tcfm_list);
+
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        spin_lock_bh(&m->tcf_lock);
-       m->tcf_action = parm->action;
-       m->tcfm_eaction = parm->eaction;
 
        if (parm->ifindex) {
                dev = dev_get_by_index(net, parm->ifindex);
                if (!dev) {
                        spin_unlock_bh(&m->tcf_lock);
-                       tcf_idr_release(*a, bind);
-                       return -ENODEV;
+                       err = -ENODEV;
+                       goto put_chain;
                }
                mac_header_xmit = dev_is_mac_header_xmit(dev);
                rcu_swap_protected(m->tcfm_dev, dev,
@@ -177,7 +184,11 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
                        dev_put(dev);
                m->tcfm_mac_header_xmit = mac_header_xmit;
        }
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
+       m->tcfm_eaction = parm->eaction;
        spin_unlock_bh(&m->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED) {
                spin_lock(&mirred_list_lock);
@@ -188,6 +199,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
        }
 
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
index 543eab9193f17ca94756bff060fee937078a1e21..e91bb8eb81ec5e2e7dfe86d2832cdb6530f8e327 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/string.h>
 #include <linux/tc_act/tc_nat.h>
 #include <net/act_api.h>
+#include <net/pkt_cls.h>
 #include <net/icmp.h>
 #include <net/ip.h>
 #include <net/netlink.h>
@@ -38,10 +39,12 @@ static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
 
 static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
                        struct tc_action **a, int ovr, int bind,
-                       bool rtnl_held, struct netlink_ext_ack *extack)
+                       bool rtnl_held, struct tcf_proto *tp,
+                       struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, nat_net_id);
        struct nlattr *tb[TCA_NAT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_nat *parm;
        int ret = 0, err;
        struct tcf_nat *p;
@@ -76,6 +79,9 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        } else {
                return err;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
        p = to_tcf_nat(*a);
 
        spin_lock_bh(&p->tcf_lock);
@@ -84,13 +90,18 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
        p->mask = parm->mask;
        p->flags = parm->flags;
 
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        spin_unlock_bh(&p->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
        return ret;
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_nat_act(struct sk_buff *skb, const struct tc_action *a,
index a80373878df769d180a6c34a9c6fdda4e846efd8..287793abfaf9bae9aba9c4f23552890c795010c1 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/tc_act/tc_pedit.h>
 #include <net/tc_act/tc_pedit.h>
 #include <uapi/linux/tc_act/tc_pedit.h>
+#include <net/pkt_cls.h>
 
 static unsigned int pedit_net_id;
 static struct tc_action_ops act_pedit_ops;
@@ -138,10 +139,11 @@ nla_failure:
 static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                          struct nlattr *est, struct tc_action **a,
                          int ovr, int bind, bool rtnl_held,
-                         struct netlink_ext_ack *extack)
+                         struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, pedit_net_id);
        struct nlattr *tb[TCA_PEDIT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_pedit_key *keys = NULL;
        struct tcf_pedit_key_ex *keys_ex;
        struct tc_pedit *parm;
@@ -205,6 +207,11 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                goto out_free;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0) {
+               ret = err;
+               goto out_release;
+       }
        p = to_pedit(*a);
        spin_lock_bh(&p->tcf_lock);
 
@@ -214,7 +221,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
                if (!keys) {
                        spin_unlock_bh(&p->tcf_lock);
                        ret = -ENOMEM;
-                       goto out_release;
+                       goto put_chain;
                }
                kfree(p->tcfp_keys);
                p->tcfp_keys = keys;
@@ -223,16 +230,21 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
        memcpy(p->tcfp_keys, parm->keys, ksize);
 
        p->tcfp_flags = parm->flags;
-       p->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
 
        kfree(p->tcfp_keys_ex);
        p->tcfp_keys_ex = keys_ex;
 
        spin_unlock_bh(&p->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 out_release:
        tcf_idr_release(*a, bind);
 out_free:
index 8271a6263824bf53aaa92f97a35916d6d31aa244..2b8581f6ab510100e66fc3e2445ed6460ff65323 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/slab.h>
 #include <net/act_api.h>
 #include <net/netlink.h>
+#include <net/pkt_cls.h>
 
 struct tcf_police_params {
        int                     tcfp_result;
@@ -83,10 +84,12 @@ static const struct nla_policy police_policy[TCA_POLICE_MAX + 1] = {
 static int tcf_police_init(struct net *net, struct nlattr *nla,
                               struct nlattr *est, struct tc_action **a,
                               int ovr, int bind, bool rtnl_held,
+                              struct tcf_proto *tp,
                               struct netlink_ext_ack *extack)
 {
        int ret = 0, tcfp_result = TC_ACT_OK, err, size;
        struct nlattr *tb[TCA_POLICE_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_police *parm;
        struct tcf_police *police;
        struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
@@ -128,6 +131,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        police = to_police(*a);
        if (parm->rate.rate) {
@@ -213,12 +219,14 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
        if (new->peak_present)
                police->tcfp_ptoks = new->tcfp_mtu_ptoks;
        spin_unlock_bh(&police->tcfp_lock);
-       police->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(police->params,
                           new,
                           lockdep_is_held(&police->tcf_lock));
        spin_unlock_bh(&police->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (new)
                kfree_rcu(new, rcu);
 
@@ -229,6 +237,9 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
 failure:
        qdisc_put_rtab(P_tab);
        qdisc_put_rtab(R_tab);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
        tcf_idr_release(*a, bind);
        return err;
 }
index 203e399e5c85a293b29d52a84f31bff929827cf4..4060b0955c97db68872a88d6bd05d5143fdc2e7c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/tc_act/tc_sample.h>
 #include <net/tc_act/tc_sample.h>
 #include <net/psample.h>
+#include <net/pkt_cls.h>
 
 #include <linux/if_arp.h>
 
@@ -37,12 +38,13 @@ static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
 
 static int tcf_sample_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a, int ovr,
-                          int bind, bool rtnl_held,
+                          int bind, bool rtnl_held, struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, sample_net_id);
        struct nlattr *tb[TCA_SAMPLE_MAX + 1];
        struct psample_group *psample_group;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_sample *parm;
        u32 psample_group_num;
        struct tcf_sample *s;
@@ -79,18 +81,21 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
        psample_group = psample_group_get(net, psample_group_num);
        if (!psample_group) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        s = to_sample(*a);
 
        spin_lock_bh(&s->tcf_lock);
-       s->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
        s->psample_group_num = psample_group_num;
        RCU_INIT_POINTER(s->psample_group, psample_group);
@@ -100,10 +105,18 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
                s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
        }
        spin_unlock_bh(&s->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_sample_cleanup(struct tc_action *a)
index d54cb608dbafae7ea9a333bd6a117824f2a1caac..23c8ca5615e59b85d22bacda25a22cc02e54d9f6 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/rtnetlink.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_defact.h>
 #include <net/tc_act/tc_defact.h>
@@ -60,14 +61,26 @@ static int alloc_defdata(struct tcf_defact *d, const struct nlattr *defdata)
        return 0;
 }
 
-static void reset_policy(struct tcf_defact *d, const struct nlattr *defdata,
-                        struct tc_defact *p)
+static int reset_policy(struct tc_action *a, const struct nlattr *defdata,
+                       struct tc_defact *p, struct tcf_proto *tp,
+                       struct netlink_ext_ack *extack)
 {
+       struct tcf_chain *goto_ch = NULL;
+       struct tcf_defact *d;
+       int err;
+
+       err = tcf_action_check_ctrlact(p->action, tp, &goto_ch, extack);
+       if (err < 0)
+               return err;
+       d = to_defact(a);
        spin_lock_bh(&d->tcf_lock);
-       d->tcf_action = p->action;
+       goto_ch = tcf_action_set_ctrlact(a, p->action, goto_ch);
        memset(d->tcfd_defdata, 0, SIMP_MAX_DATA);
        nla_strlcpy(d->tcfd_defdata, defdata, SIMP_MAX_DATA);
        spin_unlock_bh(&d->tcf_lock);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+       return 0;
 }
 
 static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
@@ -78,10 +91,11 @@ static const struct nla_policy simple_policy[TCA_DEF_MAX + 1] = {
 static int tcf_simp_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, simp_net_id);
        struct nlattr *tb[TCA_DEF_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_defact *parm;
        struct tcf_defact *d;
        bool exists = false;
@@ -122,27 +136,37 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
                }
 
                d = to_defact(*a);
-               ret = alloc_defdata(d, tb[TCA_DEF_DATA]);
-               if (ret < 0) {
-                       tcf_idr_release(*a, bind);
-                       return ret;
-               }
-               d->tcf_action = parm->action;
+               err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch,
+                                              extack);
+               if (err < 0)
+                       goto release_idr;
+
+               err = alloc_defdata(d, tb[TCA_DEF_DATA]);
+               if (err < 0)
+                       goto put_chain;
+
+               tcf_action_set_ctrlact(*a, parm->action, goto_ch);
                ret = ACT_P_CREATED;
        } else {
-               d = to_defact(*a);
-
                if (!ovr) {
-                       tcf_idr_release(*a, bind);
-                       return -EEXIST;
+                       err = -EEXIST;
+                       goto release_idr;
                }
 
-               reset_policy(d, tb[TCA_DEF_DATA], parm);
+               err = reset_policy(*a, tb[TCA_DEF_DATA], parm, tp, extack);
+               if (err)
+                       goto release_idr;
        }
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
index 65879500b688bca58c451b017928f22994fa9d82..7e1d261a31d2e73460f5d8b2d1d7869f18a1ea24 100644 (file)
@@ -26,6 +26,7 @@
 #include <net/ip.h>
 #include <net/ipv6.h>
 #include <net/dsfield.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_skbedit.h>
 #include <net/tc_act/tc_skbedit.h>
@@ -96,11 +97,13 @@ static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
 static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                            struct nlattr *est, struct tc_action **a,
                            int ovr, int bind, bool rtnl_held,
+                           struct tcf_proto *tp,
                            struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbedit_net_id);
        struct tcf_skbedit_params *params_new;
        struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tc_skbedit *parm;
        struct tcf_skbedit *d;
        u32 flags = 0, *priority = NULL, *mark = NULL, *mask = NULL;
@@ -186,11 +189,14 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                        return -EEXIST;
                }
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
        if (unlikely(!params_new)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        params_new->flags = flags;
@@ -208,16 +214,24 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
                params_new->mask = *mask;
 
        spin_lock_bh(&d->tcf_lock);
-       d->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(d->params, params_new,
                           lockdep_is_held(&d->tcf_lock));
        spin_unlock_bh(&d->tcf_lock);
        if (params_new)
                kfree_rcu(params_new, rcu);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
index 7bac1d78e7a39994ccb9bea33fd3aa0fd7030a7e..1d4c324d0a42bd2cd707a987a5595f3119f66155 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/rtnetlink.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_skbmod.h>
 #include <net/tc_act/tc_skbmod.h>
@@ -82,11 +83,13 @@ static const struct nla_policy skbmod_policy[TCA_SKBMOD_MAX + 1] = {
 static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, skbmod_net_id);
        struct nlattr *tb[TCA_SKBMOD_MAX + 1];
        struct tcf_skbmod_params *p, *p_old;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_skbmod *parm;
        struct tcf_skbmod *d;
        bool exists = false;
@@ -153,21 +156,24 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
                tcf_idr_release(*a, bind);
                return -EEXIST;
        }
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
 
        d = to_skbmod(*a);
 
        p = kzalloc(sizeof(struct tcf_skbmod_params), GFP_KERNEL);
        if (unlikely(!p)) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        p->flags = lflags;
-       d->tcf_action = parm->action;
 
        if (ovr)
                spin_lock_bh(&d->tcf_lock);
        /* Protected by tcf_lock if overwriting existing action. */
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        p_old = rcu_dereference_protected(d->skbmod_p, 1);
 
        if (lflags & SKBMOD_F_DMAC)
@@ -183,10 +189,18 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
 
        if (p_old)
                kfree_rcu(p_old, rcu);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_skbmod_cleanup(struct tc_action *a)
index 7c6591b991d510318f0eba8ec8036f62d8019a1d..d5aaf90a39712982685cbed5f60d16576324f17b 100644 (file)
@@ -17,6 +17,7 @@
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
 #include <net/dst.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_tunnel_key.h>
 #include <net/tc_act/tc_tunnel_key.h>
@@ -210,12 +211,14 @@ static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
 static int tunnel_key_init(struct net *net, struct nlattr *nla,
                           struct nlattr *est, struct tc_action **a,
                           int ovr, int bind, bool rtnl_held,
+                          struct tcf_proto *tp,
                           struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
        struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
        struct tcf_tunnel_key_params *params_new;
        struct metadata_dst *metadata = NULL;
+       struct tcf_chain *goto_ch = NULL;
        struct tc_tunnel_key *parm;
        struct tcf_tunnel_key *t;
        bool exists = false;
@@ -359,6 +362,12 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                goto release_tun_meta;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0) {
+               ret = err;
+               exists = true;
+               goto release_tun_meta;
+       }
        t = to_tunnel_key(*a);
 
        params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
@@ -366,23 +375,29 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
                NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
                ret = -ENOMEM;
                exists = true;
-               goto release_tun_meta;
+               goto put_chain;
        }
        params_new->tcft_action = parm->t_action;
        params_new->tcft_enc_metadata = metadata;
 
        spin_lock_bh(&t->tcf_lock);
-       t->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(t->params, params_new,
                           lockdep_is_held(&t->tcf_lock));
        spin_unlock_bh(&t->tcf_lock);
        tunnel_key_release_params(params_new);
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
 
        return ret;
 
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+
 release_tun_meta:
        if (metadata)
                dst_release(&metadata->dst);
index ac0061599225b6871d846dce30dd83ae37abff25..0f40d0a74423b8d91bf8bb8838eb382846710851 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/if_vlan.h>
 #include <net/netlink.h>
 #include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
 
 #include <linux/tc_act/tc_vlan.h>
 #include <net/tc_act/tc_vlan.h>
@@ -105,10 +106,11 @@ static const struct nla_policy vlan_policy[TCA_VLAN_MAX + 1] = {
 static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                         struct nlattr *est, struct tc_action **a,
                         int ovr, int bind, bool rtnl_held,
-                        struct netlink_ext_ack *extack)
+                        struct tcf_proto *tp, struct netlink_ext_ack *extack)
 {
        struct tc_action_net *tn = net_generic(net, vlan_net_id);
        struct nlattr *tb[TCA_VLAN_MAX + 1];
+       struct tcf_chain *goto_ch = NULL;
        struct tcf_vlan_params *p;
        struct tc_vlan *parm;
        struct tcf_vlan *v;
@@ -200,12 +202,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
                return -EEXIST;
        }
 
+       err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+       if (err < 0)
+               goto release_idr;
+
        v = to_vlan(*a);
 
        p = kzalloc(sizeof(*p), GFP_KERNEL);
        if (!p) {
-               tcf_idr_release(*a, bind);
-               return -ENOMEM;
+               err = -ENOMEM;
+               goto put_chain;
        }
 
        p->tcfv_action = action;
@@ -214,16 +220,24 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
        p->tcfv_push_proto = push_proto;
 
        spin_lock_bh(&v->tcf_lock);
-       v->tcf_action = parm->action;
+       goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
        rcu_swap_protected(v->vlan_p, p, lockdep_is_held(&v->tcf_lock));
        spin_unlock_bh(&v->tcf_lock);
 
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
        if (p)
                kfree_rcu(p, rcu);
 
        if (ret == ACT_P_CREATED)
                tcf_idr_insert(tn, *a);
        return ret;
+put_chain:
+       if (goto_ch)
+               tcf_chain_put_by_act(goto_ch);
+release_idr:
+       tcf_idr_release(*a, bind);
+       return err;
 }
 
 static void tcf_vlan_cleanup(struct tc_action *a)
index dc10525e90e7073563f9a3b220fc092323946a19..99ae30c177c76783dae71bf7955f4d4d0bb3b639 100644 (file)
@@ -367,7 +367,7 @@ static void tcf_chain_destroy(struct tcf_chain *chain, bool free_block)
        struct tcf_block *block = chain->block;
 
        mutex_destroy(&chain->filter_chain_lock);
-       kfree(chain);
+       kfree_rcu(chain, rcu);
        if (free_block)
                tcf_block_destroy(block);
 }
index 1d2a12132abcccdea4a237a9b06bb9296ff28f0b..acc9b9da985f81ffd9b485e082cf1781e6731ba2 100644 (file)
@@ -211,6 +211,9 @@ struct cake_sched_data {
        u8              ack_filter;
        u8              atm_mode;
 
+       u32             fwmark_mask;
+       u16             fwmark_shft;
+
        /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
        u16             rate_shft;
        ktime_t         time_next_packet;
@@ -258,8 +261,7 @@ enum {
        CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
        CAKE_FLAG_INGRESS          = BIT(2),
        CAKE_FLAG_WASH             = BIT(3),
-       CAKE_FLAG_SPLIT_GSO        = BIT(4),
-       CAKE_FLAG_FWMARK           = BIT(5)
+       CAKE_FLAG_SPLIT_GSO        = BIT(4)
 };
 
 /* COBALT operates the Codel and BLUE algorithms in parallel, in order to
@@ -1543,7 +1545,7 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
                                             struct sk_buff *skb)
 {
        struct cake_sched_data *q = qdisc_priv(sch);
-       u32 tin;
+       u32 tin, mark;
        u8 dscp;
 
        /* Tin selection: Default to diffserv-based selection, allow overriding
@@ -1551,14 +1553,13 @@ static struct cake_tin_data *cake_select_tin(struct Qdisc *sch,
         */
        dscp = cake_handle_diffserv(skb,
                                    q->rate_flags & CAKE_FLAG_WASH);
+       mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft;
 
        if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT)
                tin = 0;
 
-       else if (q->rate_flags & CAKE_FLAG_FWMARK && /* use fw mark */
-                skb->mark &&
-                skb->mark <= q->tin_cnt)
-               tin = q->tin_order[skb->mark - 1];
+       else if (mark && mark <= q->tin_cnt)
+               tin = q->tin_order[mark - 1];
 
        else if (TC_H_MAJ(skb->priority) == sch->handle &&
                 TC_H_MIN(skb->priority) > 0 &&
@@ -2172,6 +2173,7 @@ static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
        [TCA_CAKE_MPU]           = { .type = NLA_U32 },
        [TCA_CAKE_INGRESS]       = { .type = NLA_U32 },
        [TCA_CAKE_ACK_FILTER]    = { .type = NLA_U32 },
+       [TCA_CAKE_FWMARK]        = { .type = NLA_U32 },
 };
 
 static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
@@ -2619,10 +2621,8 @@ static int cake_change(struct Qdisc *sch, struct nlattr *opt,
        }
 
        if (tb[TCA_CAKE_FWMARK]) {
-               if (!!nla_get_u32(tb[TCA_CAKE_FWMARK]))
-                       q->rate_flags |= CAKE_FLAG_FWMARK;
-               else
-                       q->rate_flags &= ~CAKE_FLAG_FWMARK;
+               q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]);
+               q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0;
        }
 
        if (q->tins) {
@@ -2784,8 +2784,7 @@ static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
                        !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO)))
                goto nla_put_failure;
 
-       if (nla_put_u32(skb, TCA_CAKE_FWMARK,
-                       !!(q->rate_flags & CAKE_FLAG_FWMARK)))
+       if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask))
                goto nla_put_failure;
 
        return nla_nest_end(skb, opts);
index 6140471efd4b8cf851d238a8c80c22858346740e..9874e60c9b0d00924042c1b377bc0c777edfc4cb 100644 (file)
@@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        if (unlikely(addrs_size <= 0))
                return -EINVAL;
 
-       kaddrs = vmemdup_user(addrs, addrs_size);
+       kaddrs = memdup_user(addrs, addrs_size);
        if (unlikely(IS_ERR(kaddrs)))
                return PTR_ERR(kaddrs);
 
@@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
                if (walk_size + sizeof(sa_family_t) > addrs_size) {
-                       kvfree(kaddrs);
+                       kfree(kaddrs);
                        return -EINVAL;
                }
 
@@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
                 * causes the address buffer to overflow return EINVAL.
                 */
                if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
-                       kvfree(kaddrs);
+                       kfree(kaddrs);
                        return -EINVAL;
                }
                addrcnt++;
@@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
        }
 
 out:
-       kvfree(kaddrs);
+       kfree(kaddrs);
 
        return err;
 }
@@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
        if (unlikely(addrs_size <= 0))
                return -EINVAL;
 
-       kaddrs = vmemdup_user(addrs, addrs_size);
+       kaddrs = memdup_user(addrs, addrs_size);
        if (unlikely(IS_ERR(kaddrs)))
                return PTR_ERR(kaddrs);
 
@@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
        err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
 
 out_free:
-       kvfree(kaddrs);
+       kfree(kaddrs);
 
        return err;
 }
@@ -2920,6 +2920,9 @@ static int sctp_setsockopt_delayed_ack(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.sack_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.sack_assoc_id == SCTP_FUTURE_ASSOC ||
            params.sack_assoc_id == SCTP_ALL_ASSOC) {
                if (params.sack_delay) {
@@ -3024,6 +3027,9 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               info.sinfo_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.sinfo_assoc_id == SCTP_FUTURE_ASSOC ||
            info.sinfo_assoc_id == SCTP_ALL_ASSOC) {
                sp->default_stream = info.sinfo_stream;
@@ -3081,6 +3087,9 @@ static int sctp_setsockopt_default_sndinfo(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               info.snd_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.snd_assoc_id == SCTP_FUTURE_ASSOC ||
            info.snd_assoc_id == SCTP_ALL_ASSOC) {
                sp->default_stream = info.snd_sid;
@@ -3531,6 +3540,9 @@ static int sctp_setsockopt_context(struct sock *sk, char __user *optval,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->default_rcv_context = params.assoc_value;
@@ -3670,6 +3682,9 @@ static int sctp_setsockopt_maxburst(struct sock *sk,
                return 0;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->max_burst = params.assoc_value;
@@ -3798,6 +3813,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               authkey->sca_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (authkey->sca_assoc_id == SCTP_FUTURE_ASSOC ||
            authkey->sca_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_set_key(ep, asoc, authkey);
@@ -3853,6 +3871,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
        if (asoc)
                return sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_set_active_key(ep, asoc, val.scact_keynumber);
@@ -3904,6 +3925,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
        if (asoc)
                return sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_del_key_id(ep, asoc, val.scact_keynumber);
@@ -3954,6 +3978,9 @@ static int sctp_setsockopt_deactivate_key(struct sock *sk, char __user *optval,
        if (asoc)
                return sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
 
+       if (sctp_style(sk, TCP))
+               val.scact_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (val.scact_assoc_id == SCTP_FUTURE_ASSOC ||
            val.scact_assoc_id == SCTP_ALL_ASSOC) {
                ret = sctp_auth_deact_key_id(ep, asoc, val.scact_keynumber);
@@ -4169,6 +4196,9 @@ static int sctp_setsockopt_default_prinfo(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               info.pr_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (info.pr_assoc_id == SCTP_FUTURE_ASSOC ||
            info.pr_assoc_id == SCTP_ALL_ASSOC) {
                SCTP_PR_SET_POLICY(sp->default_flags, info.pr_policy);
@@ -4251,6 +4281,9 @@ static int sctp_setsockopt_enable_strreset(struct sock *sk,
                goto out;
        }
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                ep->strreset_enable = params.assoc_value;
@@ -4376,6 +4409,9 @@ static int sctp_setsockopt_scheduler(struct sock *sk,
        if (asoc)
                return sctp_sched_set_sched(asoc, params.assoc_value);
 
+       if (sctp_style(sk, TCP))
+               params.assoc_id = SCTP_FUTURE_ASSOC;
+
        if (params.assoc_id == SCTP_FUTURE_ASSOC ||
            params.assoc_id == SCTP_ALL_ASSOC)
                sp->default_ss = params.assoc_value;
@@ -4541,6 +4577,9 @@ static int sctp_setsockopt_event(struct sock *sk, char __user *optval,
        if (asoc)
                return sctp_assoc_ulpevent_type_set(&param, asoc);
 
+       if (sctp_style(sk, TCP))
+               param.se_assoc_id = SCTP_FUTURE_ASSOC;
+
        if (param.se_assoc_id == SCTP_FUTURE_ASSOC ||
            param.se_assoc_id == SCTP_ALL_ASSOC)
                sctp_ulpevent_type_set(&sp->subscribe,
@@ -9169,7 +9208,7 @@ static inline void sctp_copy_descendant(struct sock *sk_to,
 {
        int ancestor_size = sizeof(struct inet_sock) +
                            sizeof(struct sctp_sock) -
-                           offsetof(struct sctp_sock, auto_asconf_list);
+                           offsetof(struct sctp_sock, pd_lobby);
 
        if (sk_from->sk_family == PF_INET6)
                ancestor_size += sizeof(struct ipv6_pinfo);
@@ -9253,7 +9292,6 @@ static int sctp_sock_migrate(struct sock *oldsk, struct sock *newsk,
         * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby.
         * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue.
         */
-       skb_queue_head_init(&newsp->pd_lobby);
        atomic_set(&sctp_sk(newsk)->pd_mode, assoc->ulpq.pd_mode);
 
        if (atomic_read(&sctp_sk(oldsk)->pd_mode)) {
index 3c176a12fe48048613d89add95189bb47cd5e14e..8255f5bda0aa07dbeb78460d5bbf52a9f4510fd3 100644 (file)
@@ -384,6 +384,18 @@ static struct file_system_type sock_fs_type = {
  *     but we take care of internal coherence yet.
  */
 
+/**
+ *     sock_alloc_file - Bind a &socket to a &file
+ *     @sock: socket
+ *     @flags: file status flags
+ *     @dname: protocol name
+ *
+ *     Returns the &file bound with @sock, implicitly storing it
+ *     in sock->file. If dname is %NULL, sets to "".
+ *     On failure the return is a ERR pointer (see linux/err.h).
+ *     This function uses GFP_KERNEL internally.
+ */
+
 struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
 {
        struct file *file;
@@ -424,6 +436,14 @@ static int sock_map_fd(struct socket *sock, int flags)
        return PTR_ERR(newfile);
 }
 
+/**
+ *     sock_from_file - Return the &socket bounded to @file.
+ *     @file: file
+ *     @err: pointer to an error code return
+ *
+ *     On failure returns %NULL and assigns -ENOTSOCK to @err.
+ */
+
 struct socket *sock_from_file(struct file *file, int *err)
 {
        if (file->f_op == &socket_file_ops)
@@ -532,11 +552,11 @@ static const struct inode_operations sockfs_inode_ops = {
 };
 
 /**
- *     sock_alloc      -       allocate a socket
+ *     sock_alloc - allocate a socket
  *
  *     Allocate a new inode and socket object. The two are bound together
  *     and initialised. The socket is then returned. If we are out of inodes
- *     NULL is returned.
+ *     NULL is returned. This functions uses GFP_KERNEL internally.
  */
 
 struct socket *sock_alloc(void)
@@ -561,7 +581,7 @@ struct socket *sock_alloc(void)
 EXPORT_SYMBOL(sock_alloc);
 
 /**
- *     sock_release    -       close a socket
+ *     sock_release - close a socket
  *     @sock: socket to close
  *
  *     The socket is released from the protocol stack if it has a release
@@ -617,6 +637,15 @@ void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags)
 }
 EXPORT_SYMBOL(__sock_tx_timestamp);
 
+/**
+ *     sock_sendmsg - send a message through @sock
+ *     @sock: socket
+ *     @msg: message to send
+ *
+ *     Sends @msg through @sock, passing through LSM.
+ *     Returns the number of bytes sent, or an error code.
+ */
+
 static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg)
 {
        int ret = sock->ops->sendmsg(sock, msg, msg_data_left(msg));
@@ -633,6 +662,18 @@ int sock_sendmsg(struct socket *sock, struct msghdr *msg)
 }
 EXPORT_SYMBOL(sock_sendmsg);
 
+/**
+ *     kernel_sendmsg - send a message through @sock (kernel-space)
+ *     @sock: socket
+ *     @msg: message header
+ *     @vec: kernel vec
+ *     @num: vec array length
+ *     @size: total message data size
+ *
+ *     Builds the message data with @vec and sends it through @sock.
+ *     Returns the number of bytes sent, or an error code.
+ */
+
 int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size)
 {
@@ -641,6 +682,19 @@ int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
 }
 EXPORT_SYMBOL(kernel_sendmsg);
 
+/**
+ *     kernel_sendmsg_locked - send a message through @sock (kernel-space)
+ *     @sk: sock
+ *     @msg: message header
+ *     @vec: output s/g array
+ *     @num: output s/g array length
+ *     @size: total message data size
+ *
+ *     Builds the message data with @vec and sends it through @sock.
+ *     Returns the number of bytes sent, or an error code.
+ *     Caller must hold @sk.
+ */
+
 int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg,
                          struct kvec *vec, size_t num, size_t size)
 {
@@ -811,6 +865,16 @@ void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 }
 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
 
+/**
+ *     sock_recvmsg - receive a message from @sock
+ *     @sock: socket
+ *     @msg: message to receive
+ *     @flags: message flags
+ *
+ *     Receives @msg from @sock, passing through LSM. Returns the total number
+ *     of bytes received, or an error.
+ */
+
 static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
                                     int flags)
 {
@@ -826,20 +890,21 @@ int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags)
 EXPORT_SYMBOL(sock_recvmsg);
 
 /**
- * kernel_recvmsg - Receive a message from a socket (kernel space)
- * @sock:       The socket to receive the message from
- * @msg:        Received message
- * @vec:        Input s/g array for message data
- * @num:        Size of input s/g array
- * @size:       Number of bytes to read
- * @flags:      Message flags (MSG_DONTWAIT, etc...)
+ *     kernel_recvmsg - Receive a message from a socket (kernel space)
+ *     @sock: The socket to receive the message from
+ *     @msg: Received message
+ *     @vec: Input s/g array for message data
+ *     @num: Size of input s/g array
+ *     @size: Number of bytes to read
+ *     @flags: Message flags (MSG_DONTWAIT, etc...)
  *
- * On return the msg structure contains the scatter/gather array passed in the
- * vec argument. The array is modified so that it consists of the unfilled
- * portion of the original array.
+ *     On return the msg structure contains the scatter/gather array passed in the
+ *     vec argument. The array is modified so that it consists of the unfilled
+ *     portion of the original array.
  *
- * The returned value is the total number of bytes received, or an error.
+ *     The returned value is the total number of bytes received, or an error.
  */
+
 int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
                   struct kvec *vec, size_t num, size_t size, int flags)
 {
@@ -1005,6 +1070,13 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
  *     what to do with it - that's up to the protocol still.
  */
 
+/**
+ *     get_net_ns - increment the refcount of the network namespace
+ *     @ns: common namespace (net)
+ *
+ *     Returns the net's common namespace.
+ */
+
 struct ns_common *get_net_ns(struct ns_common *ns)
 {
        return &get_net(container_of(ns, struct net, ns))->ns;
@@ -1099,6 +1171,19 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        return err;
 }
 
+/**
+ *     sock_create_lite - creates a socket
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     Creates a new socket and assigns it to @res, passing through LSM.
+ *     The new socket initialization is not complete, see kernel_accept().
+ *     Returns 0 or an error. On failure @res is set to %NULL.
+ *     This function internally uses GFP_KERNEL.
+ */
+
 int sock_create_lite(int family, int type, int protocol, struct socket **res)
 {
        int err;
@@ -1224,6 +1309,21 @@ call_kill:
 }
 EXPORT_SYMBOL(sock_wake_async);
 
+/**
+ *     __sock_create - creates a socket
+ *     @net: net namespace
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *     @kern: boolean for kernel space sockets
+ *
+ *     Creates a new socket and assigns it to @res, passing through LSM.
+ *     Returns 0 or an error. On failure @res is set to %NULL. @kern must
+ *     be set to true if the socket resides in kernel space.
+ *     This function internally uses GFP_KERNEL.
+ */
+
 int __sock_create(struct net *net, int family, int type, int protocol,
                         struct socket **res, int kern)
 {
@@ -1333,12 +1433,35 @@ out_release:
 }
 EXPORT_SYMBOL(__sock_create);
 
+/**
+ *     sock_create - creates a socket
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     A wrapper around __sock_create().
+ *     Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
 int sock_create(int family, int type, int protocol, struct socket **res)
 {
        return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
 }
 EXPORT_SYMBOL(sock_create);
 
+/**
+ *     sock_create_kern - creates a socket (kernel space)
+ *     @net: net namespace
+ *     @family: protocol family (AF_INET, ...)
+ *     @type: communication type (SOCK_STREAM, ...)
+ *     @protocol: protocol (0, ...)
+ *     @res: new socket
+ *
+ *     A wrapper around __sock_create().
+ *     Returns 0 or an error. This function internally uses GFP_KERNEL.
+ */
+
 int sock_create_kern(struct net *net, int family, int type, int protocol, struct socket **res)
 {
        return __sock_create(net, family, type, protocol, res, 1);
@@ -3322,18 +3445,46 @@ static long compat_sock_ioctl(struct file *file, unsigned int cmd,
 }
 #endif
 
+/**
+ *     kernel_bind - bind an address to a socket (kernel space)
+ *     @sock: socket
+ *     @addr: address
+ *     @addrlen: length of address
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
 {
        return sock->ops->bind(sock, addr, addrlen);
 }
 EXPORT_SYMBOL(kernel_bind);
 
+/**
+ *     kernel_listen - move socket to listening state (kernel space)
+ *     @sock: socket
+ *     @backlog: pending connections queue size
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_listen(struct socket *sock, int backlog)
 {
        return sock->ops->listen(sock, backlog);
 }
 EXPORT_SYMBOL(kernel_listen);
 
+/**
+ *     kernel_accept - accept a connection (kernel space)
+ *     @sock: listening socket
+ *     @newsock: new connected socket
+ *     @flags: flags
+ *
+ *     @flags must be SOCK_CLOEXEC, SOCK_NONBLOCK or 0.
+ *     If it fails, @newsock is guaranteed to be %NULL.
+ *     Returns 0 or an error.
+ */
+
 int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
 {
        struct sock *sk = sock->sk;
@@ -3359,6 +3510,19 @@ done:
 }
 EXPORT_SYMBOL(kernel_accept);
 
+/**
+ *     kernel_connect - connect a socket (kernel space)
+ *     @sock: socket
+ *     @addr: address
+ *     @addrlen: address length
+ *     @flags: flags (O_NONBLOCK, ...)
+ *
+ *     For datagram sockets, @addr is the addres to which datagrams are sent
+ *     by default, and the only address from which datagrams are received.
+ *     For stream sockets, attempts to connect to @addr.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
                   int flags)
 {
@@ -3366,18 +3530,48 @@ int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
 }
 EXPORT_SYMBOL(kernel_connect);
 
+/**
+ *     kernel_getsockname - get the address which the socket is bound (kernel space)
+ *     @sock: socket
+ *     @addr: address holder
+ *
+ *     Fills the @addr pointer with the address which the socket is bound.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_getsockname(struct socket *sock, struct sockaddr *addr)
 {
        return sock->ops->getname(sock, addr, 0);
 }
 EXPORT_SYMBOL(kernel_getsockname);
 
+/**
+ *     kernel_peername - get the address which the socket is connected (kernel space)
+ *     @sock: socket
+ *     @addr: address holder
+ *
+ *     Fills the @addr pointer with the address which the socket is connected.
+ *     Returns 0 or an error code.
+ */
+
 int kernel_getpeername(struct socket *sock, struct sockaddr *addr)
 {
        return sock->ops->getname(sock, addr, 1);
 }
 EXPORT_SYMBOL(kernel_getpeername);
 
+/**
+ *     kernel_getsockopt - get a socket option (kernel space)
+ *     @sock: socket
+ *     @level: API level (SOL_SOCKET, ...)
+ *     @optname: option tag
+ *     @optval: option value
+ *     @optlen: option length
+ *
+ *     Assigns the option length to @optlen.
+ *     Returns 0 or an error.
+ */
+
 int kernel_getsockopt(struct socket *sock, int level, int optname,
                        char *optval, int *optlen)
 {
@@ -3400,6 +3594,17 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(kernel_getsockopt);
 
+/**
+ *     kernel_setsockopt - set a socket option (kernel space)
+ *     @sock: socket
+ *     @level: API level (SOL_SOCKET, ...)
+ *     @optname: option tag
+ *     @optval: option value
+ *     @optlen: option length
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_setsockopt(struct socket *sock, int level, int optname,
                        char *optval, unsigned int optlen)
 {
@@ -3420,6 +3625,17 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
 }
 EXPORT_SYMBOL(kernel_setsockopt);
 
+/**
+ *     kernel_sendpage - send a &page through a socket (kernel space)
+ *     @sock: socket
+ *     @page: page
+ *     @offset: page offset
+ *     @size: total size in bytes
+ *     @flags: flags (MSG_DONTWAIT, ...)
+ *
+ *     Returns the total amount sent in bytes or an error.
+ */
+
 int kernel_sendpage(struct socket *sock, struct page *page, int offset,
                    size_t size, int flags)
 {
@@ -3430,6 +3646,18 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
 }
 EXPORT_SYMBOL(kernel_sendpage);
 
+/**
+ *     kernel_sendpage_locked - send a &page through the locked sock (kernel space)
+ *     @sk: sock
+ *     @page: page
+ *     @offset: page offset
+ *     @size: total size in bytes
+ *     @flags: flags (MSG_DONTWAIT, ...)
+ *
+ *     Returns the total amount sent in bytes or an error.
+ *     Caller must hold @sk.
+ */
+
 int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
                           size_t size, int flags)
 {
@@ -3443,17 +3671,30 @@ int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset,
 }
 EXPORT_SYMBOL(kernel_sendpage_locked);
 
+/**
+ *     kernel_shutdown - shut down part of a full-duplex connection (kernel space)
+ *     @sock: socket
+ *     @how: connection part
+ *
+ *     Returns 0 or an error.
+ */
+
 int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
 {
        return sock->ops->shutdown(sock, how);
 }
 EXPORT_SYMBOL(kernel_sock_shutdown);
 
-/* This routine returns the IP overhead imposed by a socket i.e.
- * the length of the underlying IP header, depending on whether
- * this is an IPv4 or IPv6 socket and the length from IP options turned
- * on at the socket. Assumes that the caller has a lock on the socket.
+/**
+ *     kernel_sock_ip_overhead - returns the IP overhead imposed by a socket
+ *     @sk: socket
+ *
+ *     This routine returns the IP overhead imposed by a socket i.e.
+ *     the length of the underlying IP header, depending on whether
+ *     this is an IPv4 or IPv6 socket and the length from IP options turned
+ *     on at the socket. Assumes that the caller has a lock on the socket.
  */
+
 u32 kernel_sock_ip_overhead(struct sock *sk)
 {
        struct inet_sock *inet;
index da1a676860cad3c8a2a95acf11f0e908fe2bc255..860dcfb95ee472fed5d74e6015af2acce178c0a7 100644 (file)
@@ -550,6 +550,8 @@ EXPORT_SYMBOL_GPL(strp_check_rcv);
 static int __init strp_mod_init(void)
 {
        strp_wq = create_singlethread_workqueue("kstrp");
+       if (unlikely(!strp_wq))
+               return -ENOMEM;
 
        return 0;
 }
index 228970e6e52ba8b407be724d055976dd67530f81..187d10443a1584e196245afc9837add06daa1c86 100644 (file)
@@ -2311,6 +2311,15 @@ out_exit:
        rpc_exit(task, status);
 }
 
+static bool
+rpc_check_connected(const struct rpc_rqst *req)
+{
+       /* No allocated request or transport? return true */
+       if (!req || !req->rq_xprt)
+               return true;
+       return xprt_connected(req->rq_xprt);
+}
+
 static void
 rpc_check_timeout(struct rpc_task *task)
 {
@@ -2322,10 +2331,11 @@ rpc_check_timeout(struct rpc_task *task)
        dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
        task->tk_timeouts++;
 
-       if (RPC_IS_SOFTCONN(task)) {
+       if (RPC_IS_SOFTCONN(task) && !rpc_check_connected(task->tk_rqstp)) {
                rpc_exit(task, -ETIMEDOUT);
                return;
        }
+
        if (RPC_IS_SOFT(task)) {
                if (clnt->cl_chatty) {
                        printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
index 9359539907bafb7ca1c13ad51485756ac89ce5d0..732d4b57411a2562ad8dc4ee2633c8441f204ba0 100644 (file)
@@ -495,8 +495,8 @@ xs_read_stream_request(struct sock_xprt *transport, struct msghdr *msg,
                int flags, struct rpc_rqst *req)
 {
        struct xdr_buf *buf = &req->rq_private_buf;
-       size_t want, read;
-       ssize_t ret;
+       size_t want, uninitialized_var(read);
+       ssize_t uninitialized_var(ret);
 
        xs_read_header(transport, buf);
 
index 06fee142f09fbea05a8b27bb240a4f3d3480b5b2..63f39201e41e699104d838f206d43f7f24806d3a 100644 (file)
@@ -919,6 +919,9 @@ int tipc_group_fill_sock_diag(struct tipc_group *grp, struct sk_buff *skb)
 {
        struct nlattr *group = nla_nest_start(skb, TIPC_NLA_SOCK_GROUP);
 
+       if (!group)
+               return -EMSGSIZE;
+
        if (nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_ID,
                        grp->type) ||
            nla_put_u32(skb, TIPC_NLA_SOCK_GROUP_INSTANCE,
index f076edb74338247f0bad99cfaa5d23e5b14730ab..7ce1e86b024f09cb7345840d4a4d8e3949c5107d 100644 (file)
@@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
 
 void tipc_net_stop(struct net *net)
 {
-       u32 self = tipc_own_addr(net);
-
-       if (!self)
+       if (!tipc_own_id(net))
                return;
 
-       tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
        rtnl_lock();
        tipc_bearer_stop(net);
        tipc_node_stop(net);
index 2dc4919ab23cace02749ddb9b4838c2b64c09152..dd3b6dc17662fc42eb0b567501c6b9e8bee67031 100644 (file)
@@ -817,10 +817,10 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
 static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
 {
        struct tipc_link_entry *le = &n->links[bearer_id];
+       struct tipc_media_addr *maddr = NULL;
        struct tipc_link *l = le->link;
-       struct tipc_media_addr *maddr;
-       struct sk_buff_head xmitq;
        int old_bearer_id = bearer_id;
+       struct sk_buff_head xmitq;
 
        if (!l)
                return;
@@ -844,7 +844,8 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
        tipc_node_write_unlock(n);
        if (delete)
                tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
-       tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
+       if (!skb_queue_empty(&xmitq))
+               tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
        tipc_sk_rcv(n->net, &le->inputq);
 }
 
index 3274ef625dba1b3417405d8537b4b30e919d44d1..b542f14ed444bfcedac61ef2d1eda45d1af1add2 100644 (file)
@@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
        return 0;
 }
 
+static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
+{
+       if (addr->family != AF_TIPC)
+               return false;
+       if (addr->addrtype == TIPC_SERVICE_RANGE)
+               return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
+       return (addr->addrtype == TIPC_SERVICE_ADDR ||
+               addr->addrtype == TIPC_SOCKET_ADDR);
+}
+
 /**
  * tipc_connect - establish a connection to another TIPC port
  * @sock: socket structure
@@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
                if (!tipc_sk_type_connectionless(sk))
                        res = -EINVAL;
                goto exit;
-       } else if (dst->family != AF_TIPC) {
-               res = -EINVAL;
        }
-       if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
+       if (!tipc_sockaddr_is_sane(dst)) {
                res = -EINVAL;
-       if (res)
                goto exit;
-
+       }
        /* DGRAM/RDM connect(), just save the destaddr */
        if (tipc_sk_type_connectionless(sk)) {
                memcpy(&tsk->peer, dest, destlen);
                goto exit;
+       } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
+               res = -EINVAL;
+               goto exit;
        }
 
        previous = sk->sk_state;
@@ -3255,6 +3265,8 @@ static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
        peer_port = tsk_peer_port(tsk);
 
        nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
+       if (!nest)
+               return -EMSGSIZE;
 
        if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
                goto msg_full;
index 4a708a4e8583b5db2022b80e80529e25e787e7bb..b45932d780040a35c099a8ead80ccea40e0910b9 100644 (file)
@@ -363,6 +363,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
        struct tipc_subscription *sub;
 
        if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
+               s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
                tipc_conn_delete_sub(con, s);
                return 0;
        }
index 77520eacee8f18da45781ea70cc82ee16e3de94f..989e52386c358a34a566660933002827ba165068 100644 (file)
@@ -193,9 +193,6 @@ static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
 
 static void xdp_umem_release(struct xdp_umem *umem)
 {
-       struct task_struct *task;
-       struct mm_struct *mm;
-
        xdp_umem_clear_dev(umem);
 
        ida_simple_remove(&umem_ida, umem->id);
@@ -214,21 +211,10 @@ static void xdp_umem_release(struct xdp_umem *umem)
 
        xdp_umem_unpin_pages(umem);
 
-       task = get_pid_task(umem->pid, PIDTYPE_PID);
-       put_pid(umem->pid);
-       if (!task)
-               goto out;
-       mm = get_task_mm(task);
-       put_task_struct(task);
-       if (!mm)
-               goto out;
-
-       mmput(mm);
        kfree(umem->pages);
        umem->pages = NULL;
 
        xdp_umem_unaccount_pages(umem);
-out:
        kfree(umem);
 }
 
@@ -357,7 +343,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
        if (size_chk < 0)
                return -EINVAL;
 
-       umem->pid = get_task_pid(current, PIDTYPE_PID);
        umem->address = (unsigned long)addr;
        umem->chunk_mask = ~((u64)chunk_size - 1);
        umem->size = size;
@@ -373,7 +358,7 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
        err = xdp_umem_account_pages(umem);
        if (err)
-               goto out;
+               return err;
 
        err = xdp_umem_pin_pages(umem);
        if (err)
@@ -392,8 +377,6 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
 
 out_account:
        xdp_umem_unaccount_pages(umem);
-out:
-       put_pid(umem->pid);
        return err;
 }
 
index 2554a15ecf2b8796c41e593d97c8b55ee97a4620..76ca30cc4791912fde4d7f36e4a90549e653cfbb 100644 (file)
@@ -199,11 +199,8 @@ sub_cmd_record_mcount = perl $(srctree)/scripts/recordmcount.pl "$(ARCH)" \
        "$(if $(part-of-module),1,0)" "$(@)";
 recordmcount_source := $(srctree)/scripts/recordmcount.pl
 endif # BUILD_C_RECORDMCOUNT
-cmd_record_mcount =                                            \
-       if [ "$(findstring $(CC_FLAGS_FTRACE),$(_c_flags))" =   \
-            "$(CC_FLAGS_FTRACE)" ]; then                       \
-               $(sub_cmd_record_mcount)                        \
-       fi
+cmd_record_mcount = $(if $(findstring $(strip $(CC_FLAGS_FTRACE)),$(_c_flags)),        \
+       $(sub_cmd_record_mcount))
 endif # CC_USING_RECORD_MCOUNT
 endif # CONFIG_FTRACE_MCOUNT_RECORD
 
index 5b756278df13e8aa7f5daf92914858afbfd24482..a09333fd7cef81053bf204dd03a4db07bbb833ef 100755 (executable)
@@ -5977,7 +5977,7 @@ sub process {
                                while ($fmt =~ /(\%[\*\d\.]*p(\w))/g) {
                                        $specifier = $1;
                                        $extension = $2;
-                                       if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOx]/) {
+                                       if ($extension !~ /[SsBKRraEhMmIiUDdgVCbGNOxt]/) {
                                                $bad_specifier = $specifier;
                                                last;
                                        }
index 7395697e7f19a5f524d883b7079197434c2983ae..c9f071b0a0ab70b647bec3633571059934ece1bc 100644 (file)
@@ -32,6 +32,7 @@ if (id == NULL || ...) { ... return ...; }
 (    id
 |    (T2)dev_get_drvdata(&id->dev)
 |    (T3)platform_get_drvdata(id)
+|    &id->dev
 );
 | return@p2 ...;
 )
index 481cf301ccfc3abf2b68c8dcc8b59612ed2e9841..08470362199c7389009982ec41fed3b9860b89cc 100644 (file)
@@ -1,4 +1,4 @@
-/// Use ARRAY_SIZE instead of dividing sizeof array with sizeof an element
+/// Correct the size argument to alloc functions
 ///
 //# This makes an effort to find cases where the argument to sizeof is wrong
 //# in memory allocation functions by checking the type of the allocated memory
index 611945611bf8352d4831a51c411c2d3d5d7afc59..1dcfb288ee63630e7e73be6fe28f1fd1a3bc5857 100644 (file)
@@ -113,7 +113,8 @@ do_resize:
                        case KEY_DOWN:
                                break;
                        case KEY_BACKSPACE:
-                       case 127:
+                       case 8:   /* ^H */
+                       case 127: /* ^? */
                                if (pos) {
                                        wattrset(dialog, dlg.inputbox.atr);
                                        if (input_x == 0) {
index a4670f4e825a8c779cf4894587b6e7e31f556b02..ac92c0ded6c5c627e974679ef967d4bc37b25a53 100644 (file)
@@ -1048,7 +1048,7 @@ static int do_match(int key, struct match_state *state, int *ans)
                state->match_direction = FIND_NEXT_MATCH_UP;
                *ans = get_mext_match(state->pattern,
                                state->match_direction);
-       } else if (key == KEY_BACKSPACE || key == 127) {
+       } else if (key == KEY_BACKSPACE || key == 8 || key == 127) {
                state->pattern[strlen(state->pattern)-1] = '\0';
                adj_match_dir(&state->match_direction);
        } else
index 7be620a1fcdb8191639aaeaca7b5c6ae421d9769..77f525a8617c27788cc30f9a65c41041050806ed 100644 (file)
@@ -439,7 +439,8 @@ int dialog_inputbox(WINDOW *main_window,
                case KEY_F(F_EXIT):
                case KEY_F(F_BACK):
                        break;
-               case 127:
+               case 8:   /* ^H */
+               case 127: /* ^? */
                case KEY_BACKSPACE:
                        if (cursor_position > 0) {
                                memmove(&result[cursor_position-1],
index 0b0d1080b1c5ef4903a3b87d8fbfbc11b165e739..f277e116e0ebf64e350c636443b2f7c663b599ea 100644 (file)
@@ -639,7 +639,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
                               info->sechdrs[sym->st_shndx].sh_offset -
                               (info->hdr->e_type != ET_REL ?
                                info->sechdrs[sym->st_shndx].sh_addr : 0);
-                       crc = *crcp;
+                       crc = TO_NATIVE(*crcp);
                }
                sym_update_crc(symname + strlen("__crc_"), mod, crc,
                                export);
index 1d6463fb1450c03b8739b102b48b64e05aaa533e..353cfef71d4e9b89f0a71e46748b50dc5ce00c09 100644 (file)
@@ -239,8 +239,46 @@ source "security/safesetid/Kconfig"
 
 source "security/integrity/Kconfig"
 
+choice
+       prompt "First legacy 'major LSM' to be initialized"
+       default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
+       default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
+       default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+       default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+       default DEFAULT_SECURITY_DAC
+
+       help
+         This choice is there only for converting CONFIG_DEFAULT_SECURITY
+         in old kernel configs to CONFIG_LSM in new kernel configs. Don't
+         change this choice unless you are creating a fresh kernel config,
+         for this choice will be ignored after CONFIG_LSM has been set.
+
+         Selects the legacy "major security module" that will be
+         initialized first. Overridden by non-default CONFIG_LSM.
+
+       config DEFAULT_SECURITY_SELINUX
+               bool "SELinux" if SECURITY_SELINUX=y
+
+       config DEFAULT_SECURITY_SMACK
+               bool "Simplified Mandatory Access Control" if SECURITY_SMACK=y
+
+       config DEFAULT_SECURITY_TOMOYO
+               bool "TOMOYO" if SECURITY_TOMOYO=y
+
+       config DEFAULT_SECURITY_APPARMOR
+               bool "AppArmor" if SECURITY_APPARMOR=y
+
+       config DEFAULT_SECURITY_DAC
+               bool "Unix Discretionary Access Controls"
+
+endchoice
+
 config LSM
        string "Ordered list of enabled LSMs"
+       default "yama,loadpin,safesetid,integrity,smack,selinux,tomoyo,apparmor" if DEFAULT_SECURITY_SMACK
+       default "yama,loadpin,safesetid,integrity,apparmor,selinux,smack,tomoyo" if DEFAULT_SECURITY_APPARMOR
+       default "yama,loadpin,safesetid,integrity,tomoyo" if DEFAULT_SECURITY_TOMOYO
+       default "yama,loadpin,safesetid,integrity" if DEFAULT_SECURITY_DAC
        default "yama,loadpin,safesetid,integrity,selinux,smack,tomoyo,apparmor"
        help
          A comma-separated list of LSMs, in initialization order.
index 57cc60722dd3855021c56a3e46d900e1f0ad0efe..efac68556b4571e0ebef345935cdba035a13a285 100644 (file)
@@ -206,7 +206,7 @@ static void yama_ptracer_del(struct task_struct *tracer,
  * yama_task_free - check for task_pid to remove from exception list
  * @task: task being removed
  */
-void yama_task_free(struct task_struct *task)
+static void yama_task_free(struct task_struct *task)
 {
        yama_ptracer_del(task, task);
 }
@@ -222,7 +222,7 @@ void yama_task_free(struct task_struct *task)
  * Return 0 on success, -ve on error.  -ENOSYS is returned when Yama
  * does not handle the given option.
  */
-int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
                           unsigned long arg4, unsigned long arg5)
 {
        int rc = -ENOSYS;
@@ -401,7 +401,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
  *
  * Returns 0 if following the ptrace is allowed, -ve on error.
  */
-int yama_ptrace_traceme(struct task_struct *parent)
+static int yama_ptrace_traceme(struct task_struct *parent)
 {
        int rc = 0;
 
@@ -452,7 +452,7 @@ static int yama_dointvec_minmax(struct ctl_table *table, int write,
 static int zero;
 static int max_scope = YAMA_SCOPE_NO_ATTACH;
 
-struct ctl_path yama_sysctl_path[] = {
+static struct ctl_path yama_sysctl_path[] = {
        { .procname = "kernel", },
        { .procname = "yama", },
        { }
index d5b0d7ba83c4204db42df492a5e35f54a67c470c..f6ae68017608d83cedc394cd5c11891439e8cbec 100644 (file)
@@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
                         params_channels(params) / 8;
 
+       err = snd_pcm_oss_period_size(substream, params, sparams);
+       if (err < 0)
+               goto failure;
+
+       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
+       if (err < 0)
+               goto failure;
+
+       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
+                                    runtime->oss.periods, NULL);
+       if (err < 0)
+               goto failure;
+
+       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
+
+       err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
+       if (err < 0) {
+               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
+               goto failure;
+       }
+
 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
        snd_pcm_oss_plugin_clear(substream);
        if (!direct) {
@@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
        }
 #endif
 
-       err = snd_pcm_oss_period_size(substream, params, sparams);
-       if (err < 0)
-               goto failure;
-
-       n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
-       if (err < 0)
-               goto failure;
-
-       err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
-                                    runtime->oss.periods, NULL);
-       if (err < 0)
-               goto failure;
-
-       snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
-
-       if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
-               pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
-               goto failure;
-       }
-
        if (runtime->oss.trigger) {
                sw_params->start_threshold = 1;
        } else {
index f731f904e8ccb4e9671523e3b68e7825c779d8d8..1d8452912b14af7b211acc8796d1526d936007a2 100644 (file)
@@ -1445,8 +1445,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
-       if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
+       switch (runtime->status->state) {
+       case SNDRV_PCM_STATE_SUSPENDED:
                return -EBUSY;
+       /* unresumable PCM state; return -EBUSY for skipping suspend */
+       case SNDRV_PCM_STATE_OPEN:
+       case SNDRV_PCM_STATE_SETUP:
+       case SNDRV_PCM_STATE_DISCONNECTED:
+               return -EBUSY;
+       }
        runtime->trigger_master = substream;
        return 0;
 }
index ee601d7f092694aecd7e853845b4e3f73cd0f261..c0690d1ecd55c1ce33c9bc155abd82d2d0ec9edd 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/mm.h>
+#include <linux/nospec.h>
 #include <sound/rawmidi.h>
 #include <sound/info.h>
 #include <sound/control.h>
@@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
                return -ENXIO;
        if (info->stream < 0 || info->stream > 1)
                return -EINVAL;
+       info->stream = array_index_nospec(info->stream, 2);
        pstr = &rmidi->streams[info->stream];
        if (pstr->substream_count == 0)
                return -ENOENT;
index 278ebb9931225998dd07f0606eeabe289d71aff5..c939459172353dee5ee651ee4694f43f2aa9be7d 100644 (file)
@@ -617,13 +617,14 @@ int
 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
 {
        struct seq_oss_synth *rec;
+       struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
 
-       if (dev < 0 || dev >= dp->max_synthdev)
+       if (!info)
                return -ENXIO;
 
-       if (dp->synths[dev].is_midi) {
+       if (info->is_midi) {
                struct midi_info minf;
-               snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
+               snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
                inf->synth_type = SYNTH_TYPE_MIDI;
                inf->synth_subtype = 0;
                inf->nr_voices = 16;
index 29882bda763289374069ec4777e62b781b416673..e1ebc6d5f38226b10f689b2bc04fd0504331ced2 100644 (file)
@@ -1005,7 +1005,6 @@ struct ca0132_spec {
        unsigned int scp_resp_header;
        unsigned int scp_resp_data[4];
        unsigned int scp_resp_count;
-       bool alt_firmware_present;
        bool startup_check_entered;
        bool dsp_reload;
 
@@ -7518,7 +7517,7 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        bool dsp_loaded = false;
        struct ca0132_spec *spec = codec->spec;
        const struct dsp_image_seg *dsp_os_image;
-       const struct firmware *fw_entry;
+       const struct firmware *fw_entry = NULL;
        /*
         * Alternate firmwares for different variants. The Recon3Di apparently
         * can use the default firmware, but I'll leave the option in case
@@ -7529,33 +7528,26 @@ static bool ca0132_download_dsp_images(struct hda_codec *codec)
        case QUIRK_R3D:
        case QUIRK_AE5:
                if (request_firmware(&fw_entry, DESKTOP_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Desktop firmware not found.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Desktop firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        case QUIRK_R3DI:
                if (request_firmware(&fw_entry, R3DI_EFX_FILE,
-                                       codec->card->dev) != 0) {
+                                       codec->card->dev) != 0)
                        codec_dbg(codec, "Recon3Di alt firmware not detected.");
-                       spec->alt_firmware_present = false;
-               } else {
+               else
                        codec_dbg(codec, "Recon3Di firmware selected.");
-                       spec->alt_firmware_present = true;
-               }
                break;
        default:
-               spec->alt_firmware_present = false;
                break;
        }
        /*
         * Use default ctefx.bin if no alt firmware is detected, or if none
         * exists for your particular codec.
         */
-       if (!spec->alt_firmware_present) {
+       if (!fw_entry) {
                codec_dbg(codec, "Default firmware selected.");
                if (request_firmware(&fw_entry, EFX_FILE,
                                        codec->card->dev) != 0)
index 191830d4fa4009c4ddd8856a2c95f7f9103375e7..a3fb3d4c573090a156bf32534b1eed033f3ade16 100644 (file)
@@ -5688,6 +5688,8 @@ enum {
        ALC225_FIXUP_WYSE_AUTO_MUTE,
        ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
        ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
+       ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+       ALC299_FIXUP_PREDATOR_SPK,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -6696,6 +6698,22 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
        },
+       [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
+       },
+       [ALC299_FIXUP_PREDATOR_SPK] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
+                       { }
+               }
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6712,9 +6730,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
+       SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
        SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
        SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
        SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
@@ -7111,6 +7133,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
        {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
        {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
        {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
+       {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
        {}
 };
 #define ALC225_STANDARD_PINS \
@@ -7331,6 +7354,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x90a70130},
                {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x03211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x21, 0x04211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
+               {0x1a, 0x90a70130},
+               {0x1b, 0x90170110},
+               {0x21, 0x03211020}),
        SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
                {0x12, 0xb7a60130},
                {0x13, 0xb8a61140},
index 3c38ac9a92a7c4b18cbb4ac49ac60bf887b03d20..929c8e537a14a517c0a3c7ca5b6b15353d622c30 100644 (file)
@@ -502,16 +502,6 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
- *     Description
- *             Push an element *value* in *map*. *flags* is one of:
- *
- *             **BPF_EXIST**
- *             If the queue/stack is full, the oldest element is removed to
- *             make room for this.
- *     Return
- *             0 on success, or a negative error in case of failure.
- *
  * int bpf_probe_read(void *dst, u32 size, const void *src)
  *     Description
  *             For tracing programs, safely attempt to read *size* bytes from
@@ -1435,14 +1425,14 @@ union bpf_attr {
  * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_addr** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
  * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
  *     Description
  *             Equivalent to bpf_get_socket_cookie() helper that accepts
- *             *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ *             *skb*, but gets socket from **struct bpf_sock_ops** context.
  *     Return
  *             A 8-byte long non-decreasing number.
  *
@@ -2098,52 +2088,52 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
- * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
+ * int bpf_rc_repeat(void *ctx)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded key press with *scancode*,
- *             *toggle* value in the given *protocol*. The scancode will be
- *             translated to a keycode using the rc keymap, and reported as
- *             an input key down event. After a period a key up event is
- *             generated. This period can be extended by calling either
- *             **bpf_rc_keydown**\ () again with the same values, or calling
- *             **bpf_rc_repeat**\ ().
+ *             report a successfully decoded repeat key message. This delays
+ *             the generation of a key up event for previously generated
+ *             key down event.
  *
- *             Some protocols include a toggle bit, in case the button was
- *             released and pressed again between consecutive scancodes.
+ *             Some IR protocols like NEC have a special IR message for
+ *             repeating last button, for when a button is held down.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
- *             The *protocol* is the decoded protocol number (see
- *             **enum rc_proto** for some predefined values).
- *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * int bpf_rc_repeat(void *ctx)
+ * int bpf_rc_keydown(void *ctx, u32 protocol, u64 scancode, u32 toggle)
  *     Description
  *             This helper is used in programs implementing IR decoding, to
- *             report a successfully decoded repeat key message. This delays
- *             the generation of a key up event for previously generated
- *             key down event.
+ *             report a successfully decoded key press with *scancode*,
+ *             *toggle* value in the given *protocol*. The scancode will be
+ *             translated to a keycode using the rc keymap, and reported as
+ *             an input key down event. After a period a key up event is
+ *             generated. This period can be extended by calling either
+ *             **bpf_rc_keydown**\ () again with the same values, or calling
+ *             **bpf_rc_repeat**\ ().
  *
- *             Some IR protocols like NEC have a special IR message for
- *             repeating last button, for when a button is held down.
+ *             Some protocols include a toggle bit, in case the button was
+ *             released and pressed again between consecutive scancodes.
  *
  *             The *ctx* should point to the lirc sample as passed into
  *             the program.
  *
+ *             The *protocol* is the decoded protocol number (see
+ *             **enum rc_proto** for some predefined values).
+ *
  *             This helper is only available is the kernel was compiled with
  *             the **CONFIG_BPF_LIRC_MODE2** configuration option set to
  *             "**y**".
  *     Return
  *             0
  *
- * uint64_t bpf_skb_cgroup_id(struct sk_buff *skb)
+ * u64 bpf_skb_cgroup_id(struct sk_buff *skb)
  *     Description
  *             Return the cgroup v2 id of the socket associated with the *skb*.
  *             This is roughly similar to the **bpf_get_cgroup_classid**\ ()
@@ -2159,30 +2149,12 @@ union bpf_attr {
  *     Return
  *             The id is returned or 0 in case the id could not be retrieved.
  *
- * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
- *     Description
- *             Return id of cgroup v2 that is ancestor of cgroup associated
- *             with the *skb* at the *ancestor_level*.  The root cgroup is at
- *             *ancestor_level* zero and each step down the hierarchy
- *             increments the level. If *ancestor_level* == level of cgroup
- *             associated with *skb*, then return value will be same as that
- *             of **bpf_skb_cgroup_id**\ ().
- *
- *             The helper is useful to implement policies based on cgroups
- *             that are upper in hierarchy than immediate cgroup associated
- *             with *skb*.
- *
- *             The format of returned id and helper limitations are same as in
- *             **bpf_skb_cgroup_id**\ ().
- *     Return
- *             The id is returned or 0 in case the id could not be retrieved.
- *
  * u64 bpf_get_current_cgroup_id(void)
  *     Return
  *             A 64-bit integer containing the current cgroup id based
  *             on the cgroup within which the current task is running.
  *
- * voidget_local_storage(void *map, u64 flags)
+ * void *bpf_get_local_storage(void *map, u64 flags)
  *     Description
  *             Get the pointer to the local storage area.
  *             The type and the size of the local storage is defined
@@ -2209,6 +2181,24 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ *     Description
+ *             Return id of cgroup v2 that is ancestor of cgroup associated
+ *             with the *skb* at the *ancestor_level*.  The root cgroup is at
+ *             *ancestor_level* zero and each step down the hierarchy
+ *             increments the level. If *ancestor_level* == level of cgroup
+ *             associated with *skb*, then return value will be same as that
+ *             of **bpf_skb_cgroup_id**\ ().
+ *
+ *             The helper is useful to implement policies based on cgroups
+ *             that are upper in hierarchy than immediate cgroup associated
+ *             with *skb*.
+ *
+ *             The format of returned id and helper limitations are same as in
+ *             **bpf_skb_cgroup_id**\ ().
+ *     Return
+ *             The id is returned or 0 in case the id could not be retrieved.
+ *
  * struct bpf_sock *bpf_sk_lookup_tcp(void *ctx, struct bpf_sock_tuple *tuple, u32 tuple_size, u64 netns, u64 flags)
  *     Description
  *             Look for TCP socket matching *tuple*, optionally in a child
@@ -2289,6 +2279,16 @@ union bpf_attr {
  *     Return
  *             0 on success, or a negative error in case of failure.
  *
+ * int bpf_map_push_elem(struct bpf_map *map, const void *value, u64 flags)
+ *     Description
+ *             Push an element *value* in *map*. *flags* is one of:
+ *
+ *             **BPF_EXIST**
+ *                     If the queue/stack is full, the oldest element is
+ *                     removed to make room for this.
+ *     Return
+ *             0 on success, or a negative error in case of failure.
+ *
  * int bpf_map_pop_elem(struct bpf_map *map, void *value)
  *     Description
  *             Pop an element from *map*.
@@ -2343,29 +2343,94 @@ union bpf_attr {
  *     Return
  *             0
  *
+ * int bpf_spin_lock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Acquire a spinlock represented by the pointer *lock*, which is
+ *             stored as part of a value of a map. Taking the lock allows to
+ *             safely update the rest of the fields in that value. The
+ *             spinlock can (and must) later be released with a call to
+ *             **bpf_spin_unlock**\ (\ *lock*\ ).
+ *
+ *             Spinlocks in BPF programs come with a number of restrictions
+ *             and constraints:
+ *
+ *             * **bpf_spin_lock** objects are only allowed inside maps of
+ *               types **BPF_MAP_TYPE_HASH** and **BPF_MAP_TYPE_ARRAY** (this
+ *               list could be extended in the future).
+ *             * BTF description of the map is mandatory.
+ *             * The BPF program can take ONE lock at a time, since taking two
+ *               or more could cause dead locks.
+ *             * Only one **struct bpf_spin_lock** is allowed per map element.
+ *             * When the lock is taken, calls (either BPF to BPF or helpers)
+ *               are not allowed.
+ *             * The **BPF_LD_ABS** and **BPF_LD_IND** instructions are not
+ *               allowed inside a spinlock-ed region.
+ *             * The BPF program MUST call **bpf_spin_unlock**\ () to release
+ *               the lock, on all execution paths, before it returns.
+ *             * The BPF program can access **struct bpf_spin_lock** only via
+ *               the **bpf_spin_lock**\ () and **bpf_spin_unlock**\ ()
+ *               helpers. Loading or storing data into the **struct
+ *               bpf_spin_lock** *lock*\ **;** field of a map is not allowed.
+ *             * To use the **bpf_spin_lock**\ () helper, the BTF description
+ *               of the map value must be a struct and have **struct
+ *               bpf_spin_lock** *anyname*\ **;** field at the top level.
+ *               Nested lock inside another struct is not allowed.
+ *             * The **struct bpf_spin_lock** *lock* field in a map value must
+ *               be aligned on a multiple of 4 bytes in that value.
+ *             * Syscall with command **BPF_MAP_LOOKUP_ELEM** does not copy
+ *               the **bpf_spin_lock** field to user space.
+ *             * Syscall with command **BPF_MAP_UPDATE_ELEM**, or update from
+ *               a BPF program, do not update the **bpf_spin_lock** field.
+ *             * **bpf_spin_lock** cannot be on the stack or inside a
+ *               networking packet (it can only be inside of a map values).
+ *             * **bpf_spin_lock** is available to root only.
+ *             * Tracing programs and socket filter programs cannot use
+ *               **bpf_spin_lock**\ () due to insufficient preemption checks
+ *               (but this may change in the future).
+ *             * **bpf_spin_lock** is not allowed in inner maps of map-in-map.
+ *     Return
+ *             0
+ *
+ * int bpf_spin_unlock(struct bpf_spin_lock *lock)
+ *     Description
+ *             Release the *lock* previously locked by a call to
+ *             **bpf_spin_lock**\ (\ *lock*\ ).
+ *     Return
+ *             0
+ *
  * struct bpf_sock *bpf_sk_fullsock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_sock** pointer such
- *             that all the fields in bpf_sock can be accessed.
+ *             that all the fields in this **bpf_sock** can be accessed.
  *     Return
- *             A **struct bpf_sock** pointer on success, or NULL in
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * struct bpf_tcp_sock *bpf_tcp_sock(struct bpf_sock *sk)
  *     Description
  *             This helper gets a **struct bpf_tcp_sock** pointer from a
  *             **struct bpf_sock** pointer.
- *
  *     Return
- *             A **struct bpf_tcp_sock** pointer on success, or NULL in
+ *             A **struct bpf_tcp_sock** pointer on success, or **NULL** in
  *             case of failure.
  *
  * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
- *     Description
- *             Sets ECN of IP header to ce (congestion encountered) if
- *             current value is ect (ECN capable). Works with IPv6 and IPv4.
- *     Return
- *             1 if set, 0 if not set.
+ *     Description
+ *             Set ECN (Explicit Congestion Notification) field of IP header
+ *             to **CE** (Congestion Encountered) if current value is **ECT**
+ *             (ECN Capable Transport). Otherwise, do nothing. Works with IPv6
+ *             and IPv4.
+ *     Return
+ *             1 if the **CE** flag is set (either by the current helper call
+ *             or because it was already present), 0 if it is not set.
+ *
+ * struct bpf_sock *bpf_get_listener_sock(struct bpf_sock *sk)
+ *     Description
+ *             Return a **struct bpf_sock** pointer in **TCP_LISTEN** state.
+ *             **bpf_sk_release**\ () is unnecessary and not allowed.
+ *     Return
+ *             A **struct bpf_sock** pointer on success, or **NULL** in
+ *             case of failure.
  */
 #define __BPF_FUNC_MAPPER(FN)          \
        FN(unspec),                     \
@@ -2465,7 +2530,8 @@ union bpf_attr {
        FN(spin_unlock),                \
        FN(sk_fullsock),                \
        FN(tcp_sock),                   \
-       FN(skb_ecn_set_ce),
+       FN(skb_ecn_set_ce),             \
+       FN(get_listener_sock),
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
  * function eBPF program intends to call
index 61aaacf0cfa153bd8e798b2a028fa2832cc1ab93..5bf8e52c41fcaf2bb38127d4bb076a9164539ddb 100644 (file)
@@ -3,7 +3,7 @@
 
 BPF_VERSION = 0
 BPF_PATCHLEVEL = 0
-BPF_EXTRAVERSION = 1
+BPF_EXTRAVERSION = 2
 
 MAKEFLAGS += --no-print-directory
 
@@ -79,8 +79,6 @@ export prefix libdir src obj
 libdir_SQ = $(subst ','\'',$(libdir))
 libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
 
-LIB_FILE = libbpf.a libbpf.so
-
 VERSION                = $(BPF_VERSION)
 PATCHLEVEL     = $(BPF_PATCHLEVEL)
 EXTRAVERSION   = $(BPF_EXTRAVERSION)
@@ -88,7 +86,10 @@ EXTRAVERSION = $(BPF_EXTRAVERSION)
 OBJ            = $@
 N              =
 
-LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION)
+
+LIB_TARGET     = libbpf.a libbpf.so.$(LIBBPF_VERSION)
+LIB_FILE       = libbpf.a libbpf.so*
 
 # Set compile option CFLAGS
 ifdef EXTRA_CFLAGS
@@ -128,16 +129,18 @@ all:
 export srctree OUTPUT CC LD CFLAGS V
 include $(srctree)/tools/build/Makefile.include
 
-BPF_IN    := $(OUTPUT)libbpf-in.o
-LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
-VERSION_SCRIPT := libbpf.map
+BPF_IN         := $(OUTPUT)libbpf-in.o
+VERSION_SCRIPT := libbpf.map
+
+LIB_TARGET     := $(addprefix $(OUTPUT),$(LIB_TARGET))
+LIB_FILE       := $(addprefix $(OUTPUT),$(LIB_FILE))
 
 GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \
                           awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}')
 VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \
                              grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l)
 
-CMD_TARGETS = $(LIB_FILE)
+CMD_TARGETS = $(LIB_TARGET)
 
 CXX_TEST_TARGET = $(OUTPUT)test_libbpf
 
@@ -170,9 +173,13 @@ $(BPF_IN): force elfdep bpfdep
        echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true
        $(Q)$(MAKE) $(build)=libbpf
 
-$(OUTPUT)libbpf.so: $(BPF_IN)
-       $(QUIET_LINK)$(CC) --shared -Wl,--version-script=$(VERSION_SCRIPT) \
-               $^ -o $@
+$(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
+
+$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
+       $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
+                                   -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@
+       @ln -sf $(@F) $(OUTPUT)libbpf.so
+       @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
 
 $(OUTPUT)libbpf.a: $(BPF_IN)
        $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
@@ -192,6 +199,12 @@ check_abi: $(OUTPUT)libbpf.so
                exit 1;                                                  \
        fi
 
+define do_install_mkdir
+       if [ ! -d '$(DESTDIR_SQ)$1' ]; then             \
+               $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \
+       fi
+endef
+
 define do_install
        if [ ! -d '$(DESTDIR_SQ)$2' ]; then             \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
@@ -200,8 +213,9 @@ define do_install
 endef
 
 install_lib: all_cmd
-       $(call QUIET_INSTALL, $(LIB_FILE)) \
-               $(call do_install,$(LIB_FILE),$(libdir_SQ))
+       $(call QUIET_INSTALL, $(LIB_TARGET)) \
+               $(call do_install_mkdir,$(libdir_SQ)); \
+               cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ)
 
 install_headers:
        $(call QUIET_INSTALL, headers) \
@@ -219,7 +233,7 @@ config-clean:
 
 clean:
        $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \
-               *.o *~ *.a *.so .*.d .*.cmd LIBBPF-CFLAGS
+               *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd LIBBPF-CFLAGS
        $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf
 
 
index 5788479384cad11141cb77b1295eea26ca91bc09..cef7b77eab69507bcafa8cfe99b68cabb4f4ac78 100644 (file)
@@ -111,6 +111,7 @@ starting from ``0.0.1``.
 
 Every time ABI is being changed, e.g. because a new symbol is added or
 semantic of existing symbol is changed, ABI version should be bumped.
+This bump in ABI version is at most once per kernel development cycle.
 
 For example, if current state of ``libbpf.map`` is:
 
index 1b8d8cdd35750f7b9fc4f2fffb044f300e6bade9..87e3020ac1bc8b3772d98ce58751fc2d6f979184 100644 (file)
@@ -1602,16 +1602,12 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
 /* Calculate type signature hash of ENUM. */
 static __u32 btf_hash_enum(struct btf_type *t)
 {
-       struct btf_enum *member = (struct btf_enum *)(t + 1);
-       __u32 vlen = BTF_INFO_VLEN(t->info);
-       __u32 h = btf_hash_common(t);
-       int i;
+       __u32 h;
 
-       for (i = 0; i < vlen; i++) {
-               h = hash_combine(h, member->name_off);
-               h = hash_combine(h, member->val);
-               member++;
-       }
+       /* don't hash vlen and enum members to support enum fwd resolving */
+       h = hash_combine(0, t->name_off);
+       h = hash_combine(h, t->info & ~0xffff);
+       h = hash_combine(h, t->size);
        return h;
 }
 
@@ -1637,6 +1633,22 @@ static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
        return true;
 }
 
+static inline bool btf_is_enum_fwd(struct btf_type *t)
+{
+       return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM &&
+              BTF_INFO_VLEN(t->info) == 0;
+}
+
+static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
+{
+       if (!btf_is_enum_fwd(t1) && !btf_is_enum_fwd(t2))
+               return btf_equal_enum(t1, t2);
+       /* ignore vlen when comparing */
+       return t1->name_off == t2->name_off &&
+              (t1->info & ~0xffff) == (t2->info & ~0xffff) &&
+              t1->size == t2->size;
+}
+
 /*
  * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
  * as referenced type IDs equivalence is established separately during type
@@ -1860,6 +1872,17 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
                                new_id = cand_node->type_id;
                                break;
                        }
+                       if (d->opts.dont_resolve_fwds)
+                               continue;
+                       if (btf_compat_enum(t, cand)) {
+                               if (btf_is_enum_fwd(t)) {
+                                       /* resolve fwd to full enum */
+                                       new_id = cand_node->type_id;
+                                       break;
+                               }
+                               /* resolve canonical enum fwd to full enum */
+                               d->map[cand_node->type_id] = type_id;
+                       }
                }
                break;
 
@@ -2084,15 +2107,15 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
                return fwd_kind == real_kind;
        }
 
-       if (cand_type->info != canon_type->info)
-               return 0;
-
        switch (cand_kind) {
        case BTF_KIND_INT:
                return btf_equal_int(cand_type, canon_type);
 
        case BTF_KIND_ENUM:
-               return btf_equal_enum(cand_type, canon_type);
+               if (d->opts.dont_resolve_fwds)
+                       return btf_equal_enum(cand_type, canon_type);
+               else
+                       return btf_compat_enum(cand_type, canon_type);
 
        case BTF_KIND_FWD:
                return btf_equal_common(cand_type, canon_type);
@@ -2103,6 +2126,8 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
        case BTF_KIND_PTR:
        case BTF_KIND_TYPEDEF:
        case BTF_KIND_FUNC:
+               if (cand_type->info != canon_type->info)
+                       return 0;
                return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
 
        case BTF_KIND_ARRAY: {
index e6ad87512519f559156b74f856b59c4855ee5c6e..11c25d9ea43124fc6e67fab5a7dc8b93f5dc9e4a 100644 (file)
@@ -840,12 +840,19 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
                        obj->efile.maps_shndx = idx;
                else if (strcmp(name, BTF_ELF_SEC) == 0) {
                        obj->btf = btf__new(data->d_buf, data->d_size);
-                       if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
+                       if (IS_ERR(obj->btf)) {
                                pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
                                           BTF_ELF_SEC, PTR_ERR(obj->btf));
-                               if (!IS_ERR(obj->btf))
-                                       btf__free(obj->btf);
                                obj->btf = NULL;
+                               continue;
+                       }
+                       err = btf__load(obj->btf);
+                       if (err) {
+                               pr_warning("Error loading %s into kernel: %d. Ignored and continue.\n",
+                                          BTF_ELF_SEC, err);
+                               btf__free(obj->btf);
+                               obj->btf = NULL;
+                               err = 0;
                        }
                } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
                        btf_ext_data = data;
index f98ac82c9aea51fa5e0d9314c8673f0c0771e0a9..8d0078b65486f45730f3d967c84a6709afb23cdb 100644 (file)
@@ -126,8 +126,8 @@ static void xsk_set_umem_config(struct xsk_umem_config *cfg,
        cfg->frame_headroom = usr_cfg->frame_headroom;
 }
 
-static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
-                                     const struct xsk_socket_config *usr_cfg)
+static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
+                                    const struct xsk_socket_config *usr_cfg)
 {
        if (!usr_cfg) {
                cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
@@ -135,14 +135,19 @@ static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
                cfg->libbpf_flags = 0;
                cfg->xdp_flags = 0;
                cfg->bind_flags = 0;
-               return;
+               return 0;
        }
 
+       if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
+               return -EINVAL;
+
        cfg->rx_size = usr_cfg->rx_size;
        cfg->tx_size = usr_cfg->tx_size;
        cfg->libbpf_flags = usr_cfg->libbpf_flags;
        cfg->xdp_flags = usr_cfg->xdp_flags;
        cfg->bind_flags = usr_cfg->bind_flags;
+
+       return 0;
 }
 
 int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
@@ -557,7 +562,9 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
        }
        strncpy(xsk->ifname, ifname, IFNAMSIZ);
 
-       xsk_set_xdp_socket_config(&xsk->config, usr_config);
+       err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
+       if (err)
+               goto out_socket;
 
        if (rx) {
                err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
index c9d038f91af6b345044bb116680a66f140d2ead1..53f8be0f4a1f763e613b649aeac98399bd34eb69 100644 (file)
@@ -25,14 +25,17 @@ LIBSUBCMD           = $(LIBSUBCMD_OUTPUT)libsubcmd.a
 OBJTOOL    := $(OUTPUT)objtool
 OBJTOOL_IN := $(OBJTOOL)-in.o
 
+LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
+LIBELF_LIBS  := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
+
 all: $(OBJTOOL)
 
 INCLUDES := -I$(srctree)/tools/include \
            -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
            -I$(srctree)/tools/objtool/arch/$(ARCH)/include
 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
-CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
-LDFLAGS  += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
+CFLAGS   += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
+LDFLAGS  += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
 
 # Allow old libelf to be used:
 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
index c9433a496d548daf9417a1f19af71753f93547c0..c81fc350f7ad46ad60d53ac3dd8121059020f9a6 100644 (file)
@@ -180,6 +180,8 @@ static struct bpf_sock *(*bpf_sk_fullsock)(struct bpf_sock *sk) =
        (void *) BPF_FUNC_sk_fullsock;
 static struct bpf_tcp_sock *(*bpf_tcp_sock)(struct bpf_sock *sk) =
        (void *) BPF_FUNC_tcp_sock;
+static struct bpf_sock *(*bpf_get_listener_sock)(struct bpf_sock *sk) =
+       (void *) BPF_FUNC_get_listener_sock;
 static int (*bpf_skb_ecn_set_ce)(void *ctx) =
        (void *) BPF_FUNC_skb_ecn_set_ce;
 
index 90f8a206340ab4daa61c401144d45cbec6b15a63..ee99368c595ca0b0768ad7938212bc80977bacf1 100644 (file)
@@ -37,7 +37,7 @@ void test_map_lock(void)
        const char *file = "./test_map_lock.o";
        int prog_fd, map_fd[2], vars[17] = {};
        pthread_t thread_id[6];
-       struct bpf_object *obj;
+       struct bpf_object *obj = NULL;
        int err = 0, key = 0, i;
        void *ret;
 
index 9a573a9675d74beee07fc6025f1504682c394c21..114ebe6a438e562d864971a5a5d174b1e0936f8a 100644 (file)
@@ -5,7 +5,7 @@ void test_spinlock(void)
 {
        const char *file = "./test_spin_lock.o";
        pthread_t thread_id[4];
-       struct bpf_object *obj;
+       struct bpf_object *obj = NULL;
        int prog_fd;
        int err = 0, i;
        void *ret;
index de1a43e8f61070220c511b9aae698e4de7fb0659..37328f1485384b756c3120f428a3348206a9eb47 100644 (file)
@@ -8,38 +8,51 @@
 #include "bpf_helpers.h"
 #include "bpf_endian.h"
 
-enum bpf_array_idx {
-       SRV_IDX,
-       CLI_IDX,
-       __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+       ADDR_SRV_IDX,
+       ADDR_CLI_IDX,
+       __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+       EGRESS_SRV_IDX,
+       EGRESS_CLI_IDX,
+       INGRESS_LISTEN_IDX,
+       __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+       EGRESS_LINUM_IDX,
+       INGRESS_LINUM_IDX,
+       __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") addr_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct sockaddr_in6),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_ADDR_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") sock_result_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct bpf_sock),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") tcp_sock_result_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(struct bpf_tcp_sock),
-       .max_entries = __NR_BPF_ARRAY_IDX,
+       .max_entries = __NR_BPF_RESULT_ARRAY_IDX,
 };
 
 struct bpf_map_def SEC("maps") linum_map = {
        .type = BPF_MAP_TYPE_ARRAY,
        .key_size = sizeof(__u32),
        .value_size = sizeof(__u32),
-       .max_entries = 1,
+       .max_entries = __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 static bool is_loopback6(__u32 *a6)
@@ -100,18 +113,20 @@ static void tpcpy(struct bpf_tcp_sock *dst,
 
 #define RETURN {                                               \
        linum = __LINE__;                                       \
-       bpf_map_update_elem(&linum_map, &idx0, &linum, 0);      \
+       bpf_map_update_elem(&linum_map, &linum_idx, &linum, 0); \
        return 1;                                               \
 }
 
 SEC("cgroup_skb/egress")
-int read_sock_fields(struct __sk_buff *skb)
+int egress_read_sock_fields(struct __sk_buff *skb)
 {
-       __u32 srv_idx = SRV_IDX, cli_idx = CLI_IDX, idx;
+       __u32 srv_idx = ADDR_SRV_IDX, cli_idx = ADDR_CLI_IDX, result_idx;
        struct sockaddr_in6 *srv_sa6, *cli_sa6;
        struct bpf_tcp_sock *tp, *tp_ret;
        struct bpf_sock *sk, *sk_ret;
-       __u32 linum, idx0 = 0;
+       __u32 linum, linum_idx;
+
+       linum_idx = EGRESS_LINUM_IDX;
 
        sk = skb->sk;
        if (!sk || sk->state == 10)
@@ -132,14 +147,55 @@ int read_sock_fields(struct __sk_buff *skb)
                RETURN;
 
        if (sk->src_port == bpf_ntohs(srv_sa6->sin6_port))
-               idx = srv_idx;
+               result_idx = EGRESS_SRV_IDX;
        else if (sk->src_port == bpf_ntohs(cli_sa6->sin6_port))
-               idx = cli_idx;
+               result_idx = EGRESS_CLI_IDX;
        else
                RETURN;
 
-       sk_ret = bpf_map_lookup_elem(&sock_result_map, &idx);
-       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &idx);
+       sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
+       if (!sk_ret || !tp_ret)
+               RETURN;
+
+       skcpy(sk_ret, sk);
+       tpcpy(tp_ret, tp);
+
+       RETURN;
+}
+
+SEC("cgroup_skb/ingress")
+int ingress_read_sock_fields(struct __sk_buff *skb)
+{
+       __u32 srv_idx = ADDR_SRV_IDX, result_idx = INGRESS_LISTEN_IDX;
+       struct bpf_tcp_sock *tp, *tp_ret;
+       struct bpf_sock *sk, *sk_ret;
+       struct sockaddr_in6 *srv_sa6;
+       __u32 linum, linum_idx;
+
+       linum_idx = INGRESS_LINUM_IDX;
+
+       sk = skb->sk;
+       if (!sk || sk->family != AF_INET6 || !is_loopback6(sk->src_ip6))
+               RETURN;
+
+       srv_sa6 = bpf_map_lookup_elem(&addr_map, &srv_idx);
+       if (!srv_sa6 || sk->src_port != bpf_ntohs(srv_sa6->sin6_port))
+               RETURN;
+
+       if (sk->state != 10 && sk->state != 12)
+               RETURN;
+
+       sk = bpf_get_listener_sock(sk);
+       if (!sk)
+               RETURN;
+
+       tp = bpf_tcp_sock(sk);
+       if (!tp)
+               RETURN;
+
+       sk_ret = bpf_map_lookup_elem(&sock_result_map, &result_idx);
+       tp_ret = bpf_map_lookup_elem(&tcp_sock_result_map, &result_idx);
        if (!sk_ret || !tp_ret)
                RETURN;
 
index 38797aa627a732f31d333aaa6cf8020ceb3a211d..23e3b314ca603956ce88ed4ac8f1512eeddfa34f 100644 (file)
@@ -5874,6 +5874,50 @@ const struct btf_dedup_test dedup_tests[] = {
                .dont_resolve_fwds = false,
        },
 },
+{
+       .descr = "dedup: enum fwd resolution",
+       .input = {
+               .raw_types = {
+                       /* [1] fwd enum 'e1' before full enum */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+                       /* [2] full enum 'e1' after fwd */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 123),
+                       /* [3] full enum 'e2' before fwd */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(4), 456),
+                       /* [4] fwd enum 'e2' after full enum */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 4),
+                       /* [5] incompatible fwd enum with different size */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+                       /* [6] incompatible full enum with different value */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 321),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+       },
+       .expect = {
+               .raw_types = {
+                       /* [1] full enum 'e1' */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 123),
+                       /* [2] full enum 'e2' */
+                       BTF_TYPE_ENC(NAME_NTH(3), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(4), 456),
+                       /* [3] incompatible fwd enum with different size */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 0), 1),
+                       /* [4] incompatible full enum with different value */
+                       BTF_TYPE_ENC(NAME_NTH(1), BTF_INFO_ENC(BTF_KIND_ENUM, 0, 1), 4),
+                               BTF_ENUM_ENC(NAME_NTH(2), 321),
+                       BTF_END_RAW,
+               },
+               BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
+       },
+       .opts = {
+               .dont_resolve_fwds = false,
+       },
+},
 
 };
 
index bc8943938bf53933577c9be075c2f35314708eb3..dcae7f664dce0827f5ddda4bc5f693d191def9b0 100644 (file)
 #include "cgroup_helpers.h"
 #include "bpf_rlimit.h"
 
-enum bpf_array_idx {
-       SRV_IDX,
-       CLI_IDX,
-       __NR_BPF_ARRAY_IDX,
+enum bpf_addr_array_idx {
+       ADDR_SRV_IDX,
+       ADDR_CLI_IDX,
+       __NR_BPF_ADDR_ARRAY_IDX,
+};
+
+enum bpf_result_array_idx {
+       EGRESS_SRV_IDX,
+       EGRESS_CLI_IDX,
+       INGRESS_LISTEN_IDX,
+       __NR_BPF_RESULT_ARRAY_IDX,
+};
+
+enum bpf_linum_array_idx {
+       EGRESS_LINUM_IDX,
+       INGRESS_LINUM_IDX,
+       __NR_BPF_LINUM_ARRAY_IDX,
 };
 
 #define CHECK(condition, tag, format...) ({                            \
@@ -41,8 +54,16 @@ static int linum_map_fd;
 static int addr_map_fd;
 static int tp_map_fd;
 static int sk_map_fd;
-static __u32 srv_idx = SRV_IDX;
-static __u32 cli_idx = CLI_IDX;
+
+static __u32 addr_srv_idx = ADDR_SRV_IDX;
+static __u32 addr_cli_idx = ADDR_CLI_IDX;
+
+static __u32 egress_srv_idx = EGRESS_SRV_IDX;
+static __u32 egress_cli_idx = EGRESS_CLI_IDX;
+static __u32 ingress_listen_idx = INGRESS_LISTEN_IDX;
+
+static __u32 egress_linum_idx = EGRESS_LINUM_IDX;
+static __u32 ingress_linum_idx = INGRESS_LINUM_IDX;
 
 static void init_loopback6(struct sockaddr_in6 *sa6)
 {
@@ -93,29 +114,46 @@ static void print_tp(const struct bpf_tcp_sock *tp)
 
 static void check_result(void)
 {
-       struct bpf_tcp_sock srv_tp, cli_tp;
-       struct bpf_sock srv_sk, cli_sk;
-       __u32 linum, idx0 = 0;
+       struct bpf_tcp_sock srv_tp, cli_tp, listen_tp;
+       struct bpf_sock srv_sk, cli_sk, listen_sk;
+       __u32 ingress_linum, egress_linum;
        int err;
 
-       err = bpf_map_lookup_elem(linum_map_fd, &idx0, &linum);
+       err = bpf_map_lookup_elem(linum_map_fd, &egress_linum_idx,
+                                 &egress_linum);
        CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
              "err:%d errno:%d", err, errno);
 
-       err = bpf_map_lookup_elem(sk_map_fd, &srv_idx, &srv_sk);
-       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &srv_idx)",
+       err = bpf_map_lookup_elem(linum_map_fd, &ingress_linum_idx,
+                                 &ingress_linum);
+       CHECK(err == -1, "bpf_map_lookup_elem(linum_map_fd)",
+             "err:%d errno:%d", err, errno);
+
+       err = bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx, &srv_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_srv_idx)",
+             "err:%d errno:%d", err, errno);
+       err = bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx, &srv_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_srv_idx)",
+             "err:%d errno:%d", err, errno);
+
+       err = bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx, &cli_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &egress_cli_idx)",
              "err:%d errno:%d", err, errno);
-       err = bpf_map_lookup_elem(tp_map_fd, &srv_idx, &srv_tp);
-       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &srv_idx)",
+       err = bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx, &cli_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &egress_cli_idx)",
              "err:%d errno:%d", err, errno);
 
-       err = bpf_map_lookup_elem(sk_map_fd, &cli_idx, &cli_sk);
-       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &cli_idx)",
+       err = bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx, &listen_sk);
+       CHECK(err == -1, "bpf_map_lookup_elem(sk_map_fd, &ingress_listen_idx)",
              "err:%d errno:%d", err, errno);
-       err = bpf_map_lookup_elem(tp_map_fd, &cli_idx, &cli_tp);
-       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &cli_idx)",
+       err = bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx, &listen_tp);
+       CHECK(err == -1, "bpf_map_lookup_elem(tp_map_fd, &ingress_listen_idx)",
              "err:%d errno:%d", err, errno);
 
+       printf("listen_sk: ");
+       print_sk(&listen_sk);
+       printf("\n");
+
        printf("srv_sk: ");
        print_sk(&srv_sk);
        printf("\n");
@@ -124,6 +162,10 @@ static void check_result(void)
        print_sk(&cli_sk);
        printf("\n");
 
+       printf("listen_tp: ");
+       print_tp(&listen_tp);
+       printf("\n");
+
        printf("srv_tp: ");
        print_tp(&srv_tp);
        printf("\n");
@@ -132,6 +174,19 @@ static void check_result(void)
        print_tp(&cli_tp);
        printf("\n");
 
+       CHECK(listen_sk.state != 10 ||
+             listen_sk.family != AF_INET6 ||
+             listen_sk.protocol != IPPROTO_TCP ||
+             memcmp(listen_sk.src_ip6, &in6addr_loopback,
+                    sizeof(listen_sk.src_ip6)) ||
+             listen_sk.dst_ip6[0] || listen_sk.dst_ip6[1] ||
+             listen_sk.dst_ip6[2] || listen_sk.dst_ip6[3] ||
+             listen_sk.src_port != ntohs(srv_sa6.sin6_port) ||
+             listen_sk.dst_port,
+             "Unexpected listen_sk",
+             "Check listen_sk output. ingress_linum:%u",
+             ingress_linum);
+
        CHECK(srv_sk.state == 10 ||
              !srv_sk.state ||
              srv_sk.family != AF_INET6 ||
@@ -142,7 +197,8 @@ static void check_result(void)
                     sizeof(srv_sk.dst_ip6)) ||
              srv_sk.src_port != ntohs(srv_sa6.sin6_port) ||
              srv_sk.dst_port != cli_sa6.sin6_port,
-             "Unexpected srv_sk", "Check srv_sk output. linum:%u", linum);
+             "Unexpected srv_sk", "Check srv_sk output. egress_linum:%u",
+             egress_linum);
 
        CHECK(cli_sk.state == 10 ||
              !cli_sk.state ||
@@ -154,21 +210,31 @@ static void check_result(void)
                     sizeof(cli_sk.dst_ip6)) ||
              cli_sk.src_port != ntohs(cli_sa6.sin6_port) ||
              cli_sk.dst_port != srv_sa6.sin6_port,
-             "Unexpected cli_sk", "Check cli_sk output. linum:%u", linum);
+             "Unexpected cli_sk", "Check cli_sk output. egress_linum:%u",
+             egress_linum);
+
+       CHECK(listen_tp.data_segs_out ||
+             listen_tp.data_segs_in ||
+             listen_tp.total_retrans ||
+             listen_tp.bytes_acked,
+             "Unexpected listen_tp", "Check listen_tp output. ingress_linum:%u",
+             ingress_linum);
 
        CHECK(srv_tp.data_segs_out != 1 ||
              srv_tp.data_segs_in ||
              srv_tp.snd_cwnd != 10 ||
              srv_tp.total_retrans ||
              srv_tp.bytes_acked != DATA_LEN,
-             "Unexpected srv_tp", "Check srv_tp output. linum:%u", linum);
+             "Unexpected srv_tp", "Check srv_tp output. egress_linum:%u",
+             egress_linum);
 
        CHECK(cli_tp.data_segs_out ||
              cli_tp.data_segs_in != 1 ||
              cli_tp.snd_cwnd != 10 ||
              cli_tp.total_retrans ||
              cli_tp.bytes_received != DATA_LEN,
-             "Unexpected cli_tp", "Check cli_tp output. linum:%u", linum);
+             "Unexpected cli_tp", "Check cli_tp output. egress_linum:%u",
+             egress_linum);
 }
 
 static void test(void)
@@ -211,10 +277,10 @@ static void test(void)
              err, errno);
 
        /* Update addr_map with srv_sa6 and cli_sa6 */
-       err = bpf_map_update_elem(addr_map_fd, &srv_idx, &srv_sa6, 0);
+       err = bpf_map_update_elem(addr_map_fd, &addr_srv_idx, &srv_sa6, 0);
        CHECK(err, "map_update", "err:%d errno:%d", err, errno);
 
-       err = bpf_map_update_elem(addr_map_fd, &cli_idx, &cli_sa6, 0);
+       err = bpf_map_update_elem(addr_map_fd, &addr_cli_idx, &cli_sa6, 0);
        CHECK(err, "map_update", "err:%d errno:%d", err, errno);
 
        /* Connect from cli_sa6 to srv_sa6 */
@@ -273,9 +339,9 @@ int main(int argc, char **argv)
        struct bpf_prog_load_attr attr = {
                .file = "test_sock_fields_kern.o",
                .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
-               .expected_attach_type = BPF_CGROUP_INET_EGRESS,
        };
-       int cgroup_fd, prog_fd, err;
+       int cgroup_fd, egress_fd, ingress_fd, err;
+       struct bpf_program *ingress_prog;
        struct bpf_object *obj;
        struct bpf_map *map;
 
@@ -293,12 +359,24 @@ int main(int argc, char **argv)
        err = join_cgroup(TEST_CGROUP);
        CHECK(err, "join_cgroup", "err:%d errno:%d", err, errno);
 
-       err = bpf_prog_load_xattr(&attr, &obj, &prog_fd);
+       err = bpf_prog_load_xattr(&attr, &obj, &egress_fd);
        CHECK(err, "bpf_prog_load_xattr()", "err:%d", err);
 
-       err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
+       ingress_prog = bpf_object__find_program_by_title(obj,
+                                                        "cgroup_skb/ingress");
+       CHECK(!ingress_prog,
+             "bpf_object__find_program_by_title(cgroup_skb/ingress)",
+             "not found");
+       ingress_fd = bpf_program__fd(ingress_prog);
+
+       err = bpf_prog_attach(egress_fd, cgroup_fd, BPF_CGROUP_INET_EGRESS, 0);
        CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_EGRESS)",
              "err:%d errno%d", err, errno);
+
+       err = bpf_prog_attach(ingress_fd, cgroup_fd,
+                             BPF_CGROUP_INET_INGRESS, 0);
+       CHECK(err == -1, "bpf_prog_attach(CPF_CGROUP_INET_INGRESS)",
+             "err:%d errno%d", err, errno);
        close(cgroup_fd);
 
        map = bpf_object__find_map_by_name(obj, "addr_map");
index 4004891afa9c3dd7969419bcd8d10d4b9b9541c8..f2ccae39ee66b32c8b60890dcacff0bd89c8abd0 100644 (file)
        .errstr = "!read_ok",
        .result = REJECT,
 },
+{
+       "calls: cross frame pruning - liveness propagation",
+       .insns = {
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_IMM(BPF_REG_8, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_8, 1),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
+       BPF_MOV64_IMM(BPF_REG_9, 0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_MOV64_IMM(BPF_REG_9, 1),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1),
+       BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0),
+       BPF_MOV64_IMM(BPF_REG_0, 0),
+       BPF_EXIT_INSN(),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
+       .errstr_unpriv = "function calls to other bpf functions are allowed for root only",
+       .errstr = "!read_ok",
+       .result = REJECT,
+},
index 3ed3593bd8b61f4301b03fc9f06b97af4e8be17f..923f2110072d6f1f4a124824228c082d43f094d8 100644 (file)
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = ACCEPT,
 },
+{
+       "reference tracking: use ptr from bpf_tcp_sock() after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_sk_fullsock() after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_sk_fullsock(tp) after release",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_6, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use sk after bpf_sk_release(tp)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
+{
+       "reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, src_port)),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = ACCEPT,
+},
+{
+       "reference tracking: bpf_sk_release(listen_sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_get_listener_sock),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, offsetof(struct bpf_sock, type)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "reference has not been acquired before",
+},
+{
+       /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
+       "reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)",
+       .insns = {
+       BPF_SK_LOOKUP,
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+       BPF_EXIT_INSN(),
+       BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+       BPF_EMIT_CALL(BPF_FUNC_sk_fullsock),
+       BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_tcp_sock),
+       BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
+       BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 3),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_8, offsetof(struct bpf_tcp_sock, snd_cwnd)),
+       BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+       BPF_EMIT_CALL(BPF_FUNC_sk_release),
+       BPF_EXIT_INSN(),
+       },
+       .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+       .result = REJECT,
+       .errstr = "invalid mem access",
+},
index 0ddfdf76aba5a56f387f9f43f567cc69009c672b..416436231fab011aeebfbed0f0f253ad25bef912 100644 (file)
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "type=sock_common expected=sock",
+       .errstr = "reference has not been acquired before",
 },
 {
        "bpf_sk_release(bpf_sk_fullsock(skb->sk))",
        },
        .prog_type = BPF_PROG_TYPE_SCHED_CLS,
        .result = REJECT,
-       .errstr = "type=tcp_sock expected=sock",
+       .errstr = "reference has not been acquired before",
 },
index 5970cee6d05f26fd9be36a5b1385d62083ab3cfe..b074ea9b6fe864b25720729ff1a8c18ef03dedfd 100644 (file)
         "teardown": [
             "$TC action flush action bpf"
         ]
+    },
+    {
+        "id": "b8a1",
+        "name": "Replace bpf action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "bpf"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action bpf",
+                0,
+                1,
+                255
+            ],
+            "$TC action add action bpf bytecode '1,6 0 0 4294967295' pass index 90"
+        ],
+        "cmdUnderTest": "$TC action replace action bpf bytecode '1,6 0 0 4294967295' goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC action list action bpf",
+        "matchPattern": "action order [0-9]*: bpf.* default-action pass.*index 90",
+        "matchCount": "1",
+        "teardown": [
+            "$TC action flush action bpf"
+        ]
     }
 ]
index 13147a1f5731444abc28bd53950b3702c71835a5..cadde8f41fcd3db8cdaa21afd117cda720802b05 100644 (file)
         "teardown": [
             "$TC actions flush action connmark"
         ]
+    },
+    {
+        "id": "c506",
+        "name": "Replace connmark with invalid goto chain control",
+        "category": [
+            "actions",
+            "connmark"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action connmark",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action connmark pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action connmark goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action connmark index 90",
+        "matchPattern": "action order [0-9]+: connmark zone 0 pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action connmark"
+        ]
     }
 ]
index a022792d392a9c93bf4c33d049ea7c817b85eded..ddabb2fbb7c72b4fa49787636c0b3502fb807aeb 100644 (file)
         "matchPattern": "^[ \t]+index [0-9]+ ref",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "d128",
+        "name": "Replace csum action with invalid goto chain control",
+        "category": [
+            "actions",
+            "csum"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action csum",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action csum iph index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action csum iph goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action csum index 90",
+        "matchPattern": "action order [0-9]*: csum \\(iph\\) action pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action csum"
+        ]
     }
 ]
index 89189a03ce3d431b817fcbdaef4eb3a89219fcf9..814b7a8a478be8c9283c8d9426b829d00b9350a4 100644 (file)
         "teardown": [
             "$TC actions flush action gact"
         ]
+    },
+    {
+        "id": "ca89",
+        "name": "Replace gact action with invalid goto chain control",
+        "category": [
+            "actions",
+            "gact"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action gact",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action pass random determ drop 2 index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action goto chain 42 random determ drop 5 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action gact",
+        "matchPattern": "action order [0-9]*: gact action pass.*random type determ drop val 2.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action gact"
+        ]
     }
 ]
index 0da3545cabdb6239190c8e916d28e25a882b78d2..c13a68b98fc775086d2087205ccefeb5304a4e7a 100644 (file)
         "matchPattern": "action order [0-9]*: ife encode action pipe.*allow prio.*index 4",
         "matchCount": "0",
         "teardown": []
+    },
+    {
+        "id": "a0e2",
+        "name": "Replace ife encode action with invalid goto chain control",
+        "category": [
+            "actions",
+            "ife"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action ife",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action ife encode allow mark pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action ife encode allow mark goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action ife index 90",
+        "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E .*allow mark.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action ife"
+        ]
     }
 ]
index db49fd0f84459fdfa844bfea2fceb8723ad7b1d1..6e5fb3d256811a9c606505072d2cd1e2db085088 100644 (file)
         "teardown": [
             "$TC actions flush action mirred"
         ]
+    },
+    {
+        "id": "2a9a",
+        "name": "Replace mirred action with invalid goto chain control",
+        "category": [
+            "actions",
+            "mirred"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action mirred",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action mirred ingress mirror dev lo drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action mirred ingress mirror dev lo goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action mirred index 90",
+        "matchPattern": "action order [0-9]*: mirred \\(Ingress Mirror to device lo\\) drop.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action mirred"
+        ]
     }
 ]
index 0080dc2fd41c4542ac21f5f655a80c4c6225644a..bc12c1ccad30e9660c5ab19abfc8064aece57f05 100644 (file)
         "teardown": [
             "$TC actions flush action nat"
         ]
+    },
+    {
+        "id": "4b12",
+        "name": "Replace nat action with invalid goto chain control",
+        "category": [
+            "actions",
+            "nat"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action nat",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action nat ingress 1.18.1.1 1.18.2.2 drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action nat ingress 1.18.1.1 1.18.2.2 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action nat index 90",
+        "matchPattern": "action order [0-9]+:  nat ingress 1.18.1.1/32 1.18.2.2 drop.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action nat"
+        ]
     }
 ]
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json b/tools/testing/selftests/tc-testing/tc-tests/actions/pedit.json
new file mode 100644 (file)
index 0000000..b73ceb9
--- /dev/null
@@ -0,0 +1,51 @@
+[
+    {
+        "id": "319a",
+        "name": "Add pedit action that mangles IP TTL",
+        "category": [
+            "actions",
+            "pedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action pedit",
+                0,
+                1,
+                255
+            ]
+        ],
+        "cmdUnderTest": "$TC actions add action pedit ex munge ip ttl set 10",
+        "expExitCode": "0",
+        "verifyCmd": "$TC actions ls action pedit",
+        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*index 1 ref.*key #0  at ipv4\\+8: val 0a000000 mask 00ffffff",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action pedit"
+        ]
+    },
+    {
+        "id": "7e67",
+        "name": "Replace pedit action with invalid goto chain",
+        "category": [
+            "actions",
+            "pedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action pedit",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action pedit ex munge ip ttl set 10 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action pedit ex munge ip ttl set 10 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action pedit",
+        "matchPattern": "action order [0-9]+:  pedit action pass keys 1.*index 90 ref.*key #0  at ipv4\\+8: val 0a000000 mask 00ffffff",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action pedit"
+        ]
+    }
+]
index 4086a50a670ecba9cc46cb9872061e9151a24e42..b8268da5adaaa77a1cf4a3a7092b6beb4fffeced 100644 (file)
         "teardown": [
             "$TC actions flush action police"
         ]
+    },
+    {
+        "id": "689e",
+        "name": "Replace police action with invalid goto chain control",
+        "category": [
+            "actions",
+            "police"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action police",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action police rate 3mbit burst 250k drop index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action police rate 3mbit burst 250k goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action police index 90",
+        "matchPattern": "action order [0-9]*:  police 0x5a rate 3Mbit burst 250Kb mtu 2Kb action drop",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action police"
+        ]
     }
 ]
index 3aca33c00039615eb4687fbabf64c75b7e6bb388..27f0acaed880e765e9829306b1cc3a28e2755cbb 100644 (file)
         "teardown": [
             "$TC actions flush action sample"
         ]
+    },
+    {
+        "id": "0a6e",
+        "name": "Replace sample action with invalid goto chain control",
+        "category": [
+            "actions",
+            "sample"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action sample",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action sample rate 1024 group 4 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action sample rate 1024 group 7 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action sample",
+        "matchPattern": "action order [0-9]+: sample rate 1/1024 group 4 pass.*index 90",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action sample"
+        ]
     }
 ]
index e89a7aa4012d1664ef5a3f1d7263aaf70b3bf785..8e8c1ae12260877fea635022a4d3a194a8f3c65c 100644 (file)
         "teardown": [
             ""
         ]
+    },
+    {
+        "id": "b776",
+        "name": "Replace simple action with invalid goto chain control",
+        "category": [
+            "actions",
+            "simple"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action simple",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action simple sdata \"hello\" pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action simple sdata \"world\" goto chain 42 index  90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action simple",
+        "matchPattern": "action order [0-9]*: Simple <hello>.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action simple"
+        ]
     }
 ]
index 5aaf593b914a3646d8e62922b6dba210724fa8d9..ecd96eda7f6a1044996b2afaa37e325008acfa0e 100644 (file)
         "teardown": [
             "$TC actions flush action skbedit"
         ]
+    },
+    {
+        "id": "1b2b",
+        "name": "Replace skbedit action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "skbedit"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action skbedit",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action skbedit ptype host pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action skbedit ptype host goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions list action skbedit",
+        "matchPattern": "action order [0-9]*: skbedit  ptype host pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action skbedit"
+        ]
     }
 ]
index fe3326e939c1b11bc008b46f452c336218906460..6eb4c4f97060fd3116a77537d8566f9e66d14060 100644 (file)
         "teardown": [
             "$TC actions flush action skbmod"
         ]
+    },
+    {
+        "id": "b651",
+        "name": "Replace skbmod action with invalid goto_chain control",
+        "category": [
+            "actions",
+            "skbmod"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action skbmod",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action skbmod set etype 0x1111 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action skbmod set etype 0x1111 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions ls action skbmod",
+        "matchPattern": "action order [0-9]*: skbmod pass set etype 0x1111\\s+index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action skbmod"
+        ]
     }
 ]
index e7e15a7336b6dfd1516e0276343f4afa735b41dd..28453a445fdb7e0af074dd26daaff90e8d8a120b 100644 (file)
         "teardown": [
            "$TC actions flush action tunnel_key"
        ]
+    },
+    {
+        "id": "8242",
+        "name": "Replace tunnel_key set action with invalid goto chain",
+        "category": [
+            "actions",
+            "tunnel_key"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action tunnel_key",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2 dst_port 3128 nocsum id 1 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action tunnel_key set src_ip 10.10.10.2 dst_ip 20.20.20.1 dst_port 3129 id 2 csum goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action tunnel_key index 90",
+        "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2.*key_id 1.*dst_port 3128.*csum pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action tunnel_key"
+        ]
     }
 ]
index 69ea09eefffc27290b5c16b084f8ff3798c184e8..cc7c7d75800809115bc22d62dbcd4c646a7f35d3 100644 (file)
         "teardown": [
             "$TC actions flush action vlan"
         ]
+    },
+    {
+        "id": "e394",
+        "name": "Replace vlan push action with invalid goto chain control",
+        "category": [
+            "actions",
+            "vlan"
+        ],
+        "setup": [
+            [
+                "$TC actions flush action vlan",
+                0,
+                1,
+                255
+            ],
+            "$TC actions add action vlan push id 500 pass index 90"
+        ],
+        "cmdUnderTest": "$TC actions replace action vlan push id 500 goto chain 42 index 90 cookie c1a0c1a0",
+        "expExitCode": "255",
+        "verifyCmd": "$TC actions get action vlan index 90",
+        "matchPattern": "action order [0-9]+: vlan.*push id 500 protocol 802.1Q priority 0 pass.*index 90 ref",
+        "matchCount": "1",
+        "teardown": [
+            "$TC actions flush action vlan"
+        ]
     }
 ]