Merge tag 'for-linus-6.9a-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 May 2024 19:10:41 +0000 (12:10 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 3 May 2024 19:10:41 +0000 (12:10 -0700)
Pull xen fixes from Juergen Gross:
 "Two fixes when running as Xen PV guests for issues introduced in the
  6.9 merge window, both related to apic id handling"

* tag 'for-linus-6.9a-rc7-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip:
  x86/xen: return a sane initial apic id when running as PV guest
  x86/xen/smp_pv: Register the boot CPU APIC properly

190 files changed:
.mailmap
Documentation/core-api/workqueue.rst
Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
Documentation/devicetree/bindings/sound/rt5645.txt
Documentation/translations/zh_CN/core-api/workqueue.rst
MAINTAINERS
arch/arm/net/bpf_jit_32.c
arch/arm64/kvm/vgic/vgic-kvm-device.c
arch/arm64/net/bpf_jit_comp.c
arch/riscv/net/bpf_jit_comp64.c
arch/s390/crypto/paes_s390.c
arch/s390/include/asm/dwarf.h
arch/s390/kernel/vdso64/vdso_user_wrapper.S
arch/s390/mm/gmap.c
arch/s390/mm/hugetlbpage.c
arch/x86/net/bpf_jit_comp.c
arch/xtensa/include/asm/cacheflush.h
arch/xtensa/include/asm/processor.h
arch/xtensa/include/asm/ptrace.h
arch/xtensa/kernel/process.c
arch/xtensa/kernel/stacktrace.c
arch/xtensa/platforms/iss/console.c
drivers/base/regmap/regmap.c
drivers/block/ublk_drv.c
drivers/firewire/nosy.c
drivers/firewire/ohci.c
drivers/firmware/efi/unaccepted_memory.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
drivers/gpu/drm/imagination/pvr_fw_mips.h
drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
drivers/gpu/drm/nouveau/nvkm/core/firmware.c
drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/panel-ilitek-ili9341.c
drivers/gpu/drm/ttm/ttm_tt.c
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
drivers/gpu/drm/xe/xe_vm.c
drivers/net/dsa/mv88e6xxx/chip.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/brocade/bna/bnad_debugfs.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/intel/e1000e/phy.c
drivers/net/ethernet/intel/ice/ice_debugfs.c
drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
drivers/net/ethernet/qlogic/qede/qede_filter.c
drivers/net/vxlan/vxlan_core.c
drivers/nvme/host/core.c
drivers/nvme/host/multipath.c
drivers/nvme/host/nvme.h
drivers/nvme/host/pci.c
drivers/nvme/host/tcp.c
drivers/nvme/target/auth.c
drivers/nvme/target/configfs.c
drivers/nvme/target/core.c
drivers/nvme/target/nvmet.h
drivers/nvme/target/tcp.c
drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c
drivers/pinctrl/core.c
drivers/pinctrl/devicetree.c
drivers/pinctrl/intel/pinctrl-baytrail.c
drivers/pinctrl/intel/pinctrl-intel.h
drivers/pinctrl/mediatek/pinctrl-paris.c
drivers/pinctrl/meson/pinctrl-meson-a1.c
drivers/pinctrl/renesas/pinctrl-rzg2l.c
drivers/platform/x86/intel/speed_select_if/isst_if_common.c
drivers/power/supply/mt6360_charger.c
drivers/power/supply/rt9455_charger.c
drivers/regulator/irq_helpers.c
drivers/regulator/mt6360-regulator.c
drivers/regulator/qcom-refgen-regulator.c
drivers/regulator/vqmmc-ipq4019-regulator.c
drivers/s390/char/raw3270.c
drivers/s390/cio/cio_inject.c
drivers/s390/crypto/zcrypt_ccamisc.c
drivers/s390/crypto/zcrypt_ep11misc.c
drivers/s390/net/qeth_core_main.c
drivers/scsi/sd.c
drivers/spi/spi-axi-spi-engine.c
drivers/spi/spi-hisi-kunpeng.c
drivers/spi/spi.c
drivers/thermal/thermal_debugfs.c
fs/bcachefs/btree_node_scan.c
fs/bcachefs/btree_node_scan_types.h
fs/bcachefs/buckets.c
fs/bcachefs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/volumes.c
fs/erofs/fscache.c
fs/erofs/internal.h
fs/erofs/super.c
fs/nfs/inode.c
fs/nfsd/nfs4xdr.c
include/linux/filter.h
include/linux/regmap.h
include/linux/regulator/consumer.h
include/linux/skmsg.h
include/net/gro.h
include/sound/cs35l56.h
include/sound/emu10k1.h
kernel/bounds.c
kernel/bpf/core.c
kernel/bpf/verifier.c
kernel/workqueue.c
lib/Kconfig.debug
lib/scatterlist.c
net/8021q/vlan_core.c
net/bridge/br_forward.c
net/core/filter.c
net/core/gro.c
net/core/skbuff.c
net/core/skmsg.c
net/ipv4/af_inet.c
net/ipv4/ip_output.c
net/ipv4/raw.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/ip6_offload.c
net/ipv6/udp.c
net/ipv6/udp_offload.c
net/l2tp/l2tp_eth.c
net/mptcp/protocol.c
net/nsh/nsh.c
net/rxrpc/conn_object.c
net/rxrpc/insecure.c
net/rxrpc/rxkad.c
net/rxrpc/txbuf.c
net/sunrpc/xprtsock.c
net/tipc/msg.c
sound/hda/intel-dsp-config.c
sound/hda/intel-sdw-acpi.c
sound/pci/emu10k1/emu10k1.c
sound/pci/emu10k1/emu10k1_main.c
sound/pci/emu10k1/emumixer.c
sound/pci/emu10k1/emuproc.c
sound/pci/emu10k1/io.c
sound/pci/hda/cs35l56_hda.c
sound/pci/hda/patch_realtek.c
sound/soc/amd/yc/acp6x-mach.c
sound/soc/codecs/cs35l41.c
sound/soc/codecs/cs35l56-sdw.c
sound/soc/codecs/cs35l56-shared.c
sound/soc/codecs/cs35l56.c
sound/soc/codecs/da7219-aad.c
sound/soc/codecs/rt5645.c
sound/soc/codecs/rt715-sdca.c
sound/soc/codecs/rt715-sdw.c
sound/soc/codecs/rt722-sdca.c
sound/soc/codecs/rt722-sdca.h
sound/soc/codecs/wsa881x.c
sound/soc/intel/avs/icl.c
sound/soc/intel/avs/topology.c
sound/soc/intel/boards/bytcr_rt5640.c
sound/soc/meson/Kconfig
sound/soc/meson/axg-card.c
sound/soc/meson/axg-fifo.c
sound/soc/meson/axg-tdm-formatter.c
sound/soc/meson/axg-tdm-interface.c
sound/soc/meson/axg-tdm.h
sound/soc/sof/core.c
sound/soc/sof/debug.c
sound/soc/sof/intel/pci-lnl.c
sound/soc/sof/ipc3-pcm.c
sound/soc/sof/ipc4-pcm.c
sound/soc/sof/pcm.c
sound/soc/sof/sof-audio.h
sound/soc/tegra/tegra186_dspk.c
sound/soc/ti/davinci-mcasp.c
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
tools/testing/selftests/kvm/aarch64/vgic_init.c

index 16b704e1d5d3665d178f48992a8e3d03cad57cac..9f41b3906b663794ca27b77730d12cba68f05c35 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -512,6 +512,7 @@ Praveen BP <praveenbp@ti.com>
 Pradeep Kumar Chitrapu <quic_pradeepc@quicinc.com> <pradeepc@codeaurora.org>
 Prasad Sodagudi <quic_psodagud@quicinc.com> <psodagud@codeaurora.org>
 Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
+Puranjay Mohan <puranjay@kernel.org> <puranjay12@gmail.com>
 Qais Yousef <qyousef@layalina.io> <qais.yousef@imgtec.com>
 Qais Yousef <qyousef@layalina.io> <qais.yousef@arm.com>
 Quentin Monnet <qmo@kernel.org> <quentin.monnet@netronome.com>
index ed73c612174d4c99d3329cc1443b553537e049db..bcc370c876be9538c5020c6a14e8584d9090ebdf 100644 (file)
@@ -671,7 +671,7 @@ configuration, worker pools and how workqueues map to the pools: ::
   events_unbound           unbound  9  9 10 10  8
   events_freezable         percpu   0  2  4  6
   events_power_efficient   percpu   0  2  4  6
-  events_freezable_power_  percpu   0  2  4  6
+  events_freezable_pwr_ef  percpu   0  2  4  6
   rcu_gp                   percpu   0  2  4  6
   rcu_par_gp               percpu   0  2  4  6
   slub_flushwq             percpu   0  2  4  6
@@ -694,7 +694,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
   events_unbound              38306     0      0.1       -       7       -       -
   events_freezable                0     0      0.0       0       0       -       -
   events_power_efficient      29598     0      0.2       0       0       -       -
-  events_freezable_power_        10     0      0.0       0       0       -       -
+  events_freezable_pwr_ef        10     0      0.0       0       0       -       -
   sock_diag_events                0     0      0.0       0       0       -       -
 
                               total  infl  CPUtime  CPUhog CMW/RPR  mayday rescued
@@ -704,7 +704,7 @@ Use tools/workqueue/wq_monitor.py to monitor workqueue operations: ::
   events_unbound              38322     0      0.1       -       7       -       -
   events_freezable                0     0      0.0       0       0       -       -
   events_power_efficient      29603     0      0.2       0       0       -       -
-  events_freezable_power_        10     0      0.0       0       0       -       -
+  events_freezable_pwr_ef        10     0      0.0       0       0       -       -
   sock_diag_events                0     0      0.0       0       0       -       -
 
   ...
index d476de82e5c3f487b0275d1f2d8d8b5a79a6a116..4d5a957fa232eb55d0bd71ea887eefec35b60063 100644 (file)
@@ -120,7 +120,9 @@ additionalProperties:
         slew-rate: true
         gpio-hog: true
         gpios: true
+        input: true
         input-enable: true
+        output-enable: true
         output-high: true
         output-low: true
         line-name: true
index 41a62fd2ae1ffb4f2afaa58d204797a2b15e91ec..c1fa379f5f3ea1388ed9e54ea8fef66d4645d673 100644 (file)
@@ -20,6 +20,11 @@ Optional properties:
   a GPIO spec for the external headphone detect pin. If jd-mode = 0,
   we will get the JD status by getting the value of hp-detect-gpios.
 
+- cbj-sleeve-gpios:
+  a GPIO spec to control the external combo jack circuit to tie the sleeve/ring2
+  contacts to the ground or floating. It could avoid some electric noise from the
+  active speaker jacks.
+
 - realtek,in2-differential
   Boolean. Indicate MIC2 input are differential, rather than single-ended.
 
@@ -68,6 +73,7 @@ codec: rt5650@1a {
        compatible = "realtek,rt5650";
        reg = <0x1a>;
        hp-detect-gpios = <&gpio 19 0>;
+       cbj-sleeve-gpios = <&gpio 20 0>;
        interrupt-parent = <&gpio>;
        interrupts = <7 IRQ_TYPE_EDGE_FALLING>;
        realtek,dmic-en = "true";
index 7fac6f75d0782062f3f11c342068e9f13e90f2ad..fe0ff5a127f3f9255fe2606c15beb9380522cc4c 100644 (file)
@@ -7,12 +7,13 @@
 
  司延腾 Yanteng Si <siyanteng@loongson.cn>
  周彬彬 Binbin Zhou <zhoubinbin@loongson.cn>
+ 陈兴友 Xingyou Chen <rockrush@rockwork.org>
 
 .. _cn_workqueue.rst:
 
-=========================
¹¶å\8f\91管ç\90\86ç\9a\84å·¥ä½\9cé\98\9få\88\97 (cmwq)
-=========================
+========
·¥ä½\9cé\98\9få\88\97
+========
 
 :日期: September, 2010
 :作者: Tejun Heo <tj@kernel.org>
@@ -22,7 +23,7 @@
 简介
 ====
 
-在很多情况下,需要一个异步进程的执行环境,工作队列(wq)API是这种情况下
+在很多情况下,需要一个异步的程序执行环境,工作队列(wq)API是这种情况下
 最常用的机制。
 
 当需要这样一个异步执行上下文时,一个描述将要执行的函数的工作项(work,
@@ -34,8 +35,8 @@
 队列时,工作者又开始执行。
 
 
-为什么要cmwq?
-=============
+为什么要有并发管理工作队列?
+===========================
 
 在最初的wq实现中,多线程(MT)wq在每个CPU上有一个工作者线程,而单线程
 (ST)wq在全系统有一个工作者线程。一个MT wq需要保持与CPU数量相同的工
 向该函数的工作项,并在工作队列中排队等待该工作项。(就是挂到workqueue
 队列里面去)
 
-特定目的线程,称为工作线程(工作者),一个接一个地执行队列中的功能。
-如果没有工作项排队,工作者线程就会闲置。这些工作者线程被管理在所谓
-的工作者池中。
+工作项可以在线程或BH(软中断)上下文中执行。
+
+对于由线程执行的工作队列,被称为(内核)工作者([k]worker)的特殊
+线程会依次执行其中的函数。如果没有工作项排队,工作者线程就会闲置。
+这些工作者线程被管理在所谓的工作者池中。
 
 cmwq设计区分了面向用户的工作队列,子系统和驱动程序在上面排队工作,
 以及管理工作者池和处理排队工作项的后端机制。
@@ -84,6 +87,10 @@ cmwq设计区分了面向用户的工作队列,子系统和驱动程序在上
 优先级的工作项,还有一些额外的工作者池,用于服务未绑定工作队列的工
 作项目——这些后备池的数量是动态的。
 
+BH工作队列使用相同的结构。然而,由于同一时间只可能有一个执行上下文,
+不需要担心并发问题。每个CPU上的BH工作者池只包含一个用于表示BH执行
+上下文的虚拟工作者。BH工作队列可以被看作软中断的便捷接口。
+
 当他们认为合适的时候,子系统和驱动程序可以通过特殊的
 ``workqueue API`` 函数创建和排队工作项。他们可以通过在工作队列上
 设置标志来影响工作项执行方式的某些方面,他们把工作项放在那里。这些
@@ -95,9 +102,9 @@ cmwq设计区分了面向用户的工作队列,子系统和驱动程序在上
 否则一个绑定的工作队列的工作项将被排在与发起线程运行的CPU相关的普
 通或高级工作工作者池的工作项列表中。
 
-对于任何工作者池的实施,管理并发水平(有多少执行上下文处于活动状
-态)是一个重要问题。最低水平是为了节省资源,而饱和水平是指系统被
-充分使用。
+对于任何线程池的实施,管理并发水平(有多少执行上下文处于活动状
+态)是一个重要问题。cmwq试图将并发保持在一个尽可能低且充足的
+水平。最低水平是为了节省资源,而充足是为了使系统能被充分使用。
 
 每个与实际CPU绑定的worker-pool通过钩住调度器来实现并发管理。每当
 一个活动的工作者被唤醒或睡眠时,工作者池就会得到通知,并跟踪当前可
@@ -140,6 +147,17 @@ workqueue将自动创建与属性相匹配的后备工作者池。调节并发
 ``flags``
 ---------
 
+``WQ_BH``
+  BH工作队列可以被看作软中断的便捷接口。它总是每个CPU一份,
+  其中的各个工作项也会按在队列中的顺序,被所属CPU在软中断
+  上下文中执行。
+
+  BH工作队列的 ``max_active`` 值必须为0,且只能单独或和
+  ``WQ_HIGHPRI`` 标志组合使用。
+
+  BH工作项不可以睡眠。像延迟排队、冲洗、取消等所有其他特性
+  都是支持的。
+
 ``WQ_UNBOUND``
   排队到非绑定wq的工作项由特殊的工作者池提供服务,这些工作者不
   绑定在任何特定的CPU上。这使得wq表现得像一个简单的执行环境提
@@ -184,25 +202,21 @@ workqueue将自动创建与属性相匹配的后备工作者池。调节并发
 --------------
 
 ``@max_active`` 决定了每个CPU可以分配给wq的工作项的最大执行上
-下文数量。例如,如果 ``@max_active为16`` ,每个CPU最多可以同
-时执行16个wq的工作项。
+下文数量。例如,如果 ``@max_active`` 为16 ,每个CPU最多可以同
+时执行16个wq的工作项。它总是每CPU属性,即便对于未绑定 wq。
 
-目前,对于一个绑定的wq, ``@max_active`` 的最大限制是512,当指
-定为0时使用的默认值是256。对于非绑定的wq,其限制是512和
-4 * ``num_possible_cpus()`` 中的较高值。这些值被选得足够高,所
-以它们不是限制性因素,同时会在失控情况下提供保护。
+``@max_active`` 的最大限制是512,当指定为0时使用的默认值是256。
+这些值被选得足够高,所以它们不是限制性因素,同时会在失控情况下提供
+保护。
 
 一个wq的活动工作项的数量通常由wq的用户来调节,更具体地说,是由用
 户在同一时间可以排列多少个工作项来调节。除非有特定的需求来控制活动
 工作项的数量,否则建议指定 为"0"。
 
-一些用户依赖于ST wq的严格执行顺序。 ``@max_active`` 为1和 ``WQ_UNBOUND``
-的组合用来实现这种行为。这种wq上的工作项目总是被排到未绑定的工作池
-中,并且在任何时候都只有一个工作项目处于活动状态,从而实现与ST wq相
-同的排序属性。
-
-在目前的实现中,上述配置只保证了特定NUMA节点内的ST行为。相反,
-``alloc_ordered_workqueue()`` 应该被用来实现全系统的ST行为。
+一些用户依赖于任意时刻最多只有一个工作项被执行,且各工作项被按队列中
+顺序处理带来的严格执行顺序。``@max_active`` 为1和 ``WQ_UNBOUND``
+的组合曾被用来实现这种行为,现在不用了。请使用
+``alloc_ordered_workqueue()`` 。
 
 
 执行场景示例
@@ -285,7 +299,7 @@ And with cmwq with ``@max_active`` >= 3, ::
 * 除非有特殊需要,建议使用0作为@max_active。在大多数使用情
   况下,并发水平通常保持在默认限制之下。
 
-* ä¸\80个wqä½\9c为å\89\8dè¿\9bè¿\9b度ä¿\9dè¯\81ï¼\88WQ_MEM_RECLAIM,冲洗(flush)和工
+* ä¸\80个wqä½\9c为å\89\8dè¿\9bè¿\9b度ä¿\9dè¯\81ï¼\8c``WQ_MEM_RECLAIM`` ,冲洗(flush)和工
   作项属性的域。不涉及内存回收的工作项,不需要作为工作项组的一
   部分被刷新,也不需要任何特殊属性,可以使用系统中的一个wq。使
   用专用wq和系统wq在执行特性上没有区别。
@@ -294,6 +308,337 @@ And with cmwq with ``@max_active`` >= 3, ::
   益的,因为wq操作和工作项执行中的定位水平提高了。
 
 
+亲和性作用域
+============
+
+一个非绑定工作队列根据其亲和性作用域来对CPU进行分组以提高缓存
+局部性。比如如果一个工作队列使用默认的“cache”亲和性作用域,
+它将根据最后一级缓存的边界来分组处理器。这个工作队列上的工作项
+将被分配给一个与发起CPU共用最后级缓存的处理器上的工作者。根据
+``affinity_strict`` 的设置,工作者在启动后可能被允许移出
+所在作用域,也可能不被允许。
+
+工作队列目前支持以下亲和性作用域。
+
+``default``
+  使用模块参数 ``workqueue.default_affinity_scope`` 指定
+  的作用域,该参数总是会被设为以下作用域中的一个。
+
+``cpu``
+  CPU不被分组。一个CPU上发起的工作项会被同一CPU上的工作者执行。
+  这使非绑定工作队列表现得像是不含并发管理的每CPU工作队列。
+
+``smt``
+  CPU被按SMT边界分组。这通常意味着每个物理CPU核上的各逻辑CPU会
+  被分进同一组。
+
+``cache``
+  CPU被按缓存边界分组。采用哪个缓存边界由架构代码决定。很多情况
+  下会使用L3。这是默认的亲和性作用域。
+
+``numa``
+  CPU被按NUMA边界分组。
+
+``system``
+  所有CPU被放在同一组。工作队列不尝试在临近发起CPU的CPU上运行
+  工作项。
+
+默认的亲和性作用域可以被模块参数 ``workqueue.default_affinity_scope``
+修改,特定工作队列的亲和性作用域可以通过 ``apply_workqueue_attrs()``
+被更改。
+
+如果设置了 ``WQ_SYSFS`` ,工作队列会在它的 ``/sys/devices/virtual/workqueue/WQ_NAME/``
+目录中有以下亲和性作用域相关的接口文件。
+
+``affinity_scope``
+  读操作以查看当前的亲和性作用域。写操作用于更改设置。
+
+  当前作用域是默认值时,当前生效的作用域也可以被从这个文件中
+  读到(小括号内),例如 ``default (cache)`` 。
+
+``affinity_strict``
+  默认值0表明亲和性作用域不是严格的。当一个工作项开始执行时,
+  工作队列尽量尝试使工作者处于亲和性作用域内,称为遣返。启动后,
+  调度器可以自由地将工作者调度到系统中任意它认为合适的地方去。
+  这使得在保留使用其他CPU(如果必需且有可用)能力的同时,
+  还能从作用域局部性上获益。
+
+  如果设置为1,作用域内的所有工作者将被保证总是处于作用域内。
+  这在跨亲和性作用域会导致如功耗、负载隔离等方面的潜在影响时
+  会有用。严格的NUMA作用域也可用于和旧版内核中工作队列的行为
+  保持一致。
+
+
+亲和性作用域与性能
+==================
+
+如果非绑定工作队列的行为对绝大多数使用场景来说都是最优的,
+不需要更多调节,就完美了。很不幸,在当前内核中,重度使用
+工作队列时,需要在局部性和利用率间显式地作一个明显的权衡。
+
+更高的局部性带来更高效率,也就是相同数量的CPU周期内可以做
+更多工作。然而,如果发起者没能将工作项充分地分散在亲和性
+作用域间,更高的局部性也可能带来更低的整体系统利用率。以下
+dm-crypt 的性能测试清楚地阐明了这一取舍。
+
+测试运行在一个12核24线程、4个L3缓存的处理器(AMD Ryzen
+9 3900x)上。为保持一致性,关闭CPU超频。 ``/dev/dm-0``
+是NVME SSD(三星 990 PRO)上创建,用 ``cryptsetup``
+以默认配置打开的一个 dm-crypt 设备。
+
+
+场景 1: 机器上遍布着有充足的发起者和工作量
+------------------------------------------
+
+使用命令:::
+
+  $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k --ioengine=libaio \
+    --iodepth=64 --runtime=60 --numjobs=24 --time_based --group_reporting \
+    --name=iops-test-job --verify=sha512
+
+这里有24个发起者,每个同时发起64个IO。 ``--verify=sha512``
+使得 ``fio`` 每次生成和读回内容受发起者和 ``kcryptd``
+间的执行局部性影响。下面是基于不同 ``kcryptd`` 的亲和性
+作用域设置,各经过五次测试得到的读取带宽和CPU利用率数据。
+
+.. list-table::
+   :widths: 16 20 20
+   :header-rows: 1
+
+   * - 亲和性
+     - 带宽 (MiBps)
+     - CPU利用率(%)
+
+   * - system
+     - 1159.40 ±1.34
+     - 99.31 ±0.02
+
+   * - cache
+     - 1166.40 ±0.89
+     - 99.34 ±0.01
+
+   * - cache (strict)
+     - 1166.00 ±0.71
+     - 99.35 ±0.01
+
+在系统中分布着足够多发起者的情况下,不论严格与否,“cache”
+没有表现得更差。三种配置均使整个机器达到饱和,但由于提高了
+局部性,缓存相关的两种有0.6%的(带宽)提升。
+
+
+场景 2: 更少发起者,足以达到饱和的工作量
+----------------------------------------
+
+使用命令:::
+
+  $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+    --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=8 \
+    --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+与上一个场景唯一的区别是 ``--numjobs=8``。 发起者数量
+减少为三分之一,但仍然有足以使系统达到饱和的工作总量。
+
+.. list-table::
+   :widths: 16 20 20
+   :header-rows: 1
+
+   * - 亲和性
+     - 带宽 (MiBps)
+     - CPU利用率(%)
+
+   * - system
+     - 1155.40 ±0.89
+     - 97.41 ±0.05
+
+   * - cache
+     - 1154.40 ±1.14
+     - 96.15 ±0.09
+
+   * - cache (strict)
+     - 1112.00 ±4.64
+     - 93.26 ±0.35
+
+这里有超过使系统达到饱和所需的工作量。“system”和“cache”
+都接近但并未使机器完全饱和。“cache”消耗更少的CPU但更高的
+效率使其得到和“system”相同的带宽。
+
+八个发起者盘桓在四个L3缓存作用域间仍然允许“cache (strict)”
+几乎使机器饱和,但缺少对工作的保持(不移到空闲处理器上)
+开始带来3.7%的带宽损失。
+
+
+场景 3: 更少发起者,不充足的工作量
+----------------------------------
+
+使用命令:::
+
+  $ fio --filename=/dev/dm-0 --direct=1 --rw=randrw --bs=32k \
+    --ioengine=libaio --iodepth=64 --runtime=60 --numjobs=4 \
+    --time_based --group_reporting --name=iops-test-job --verify=sha512
+
+再次,唯一的区别是 ``--numjobs=4``。由于发起者减少到四个,
+现在没有足以使系统饱和的工作量,带宽变得依赖于完成时延。
+
+.. list-table::
+   :widths: 16 20 20
+   :header-rows: 1
+
+   * - 亲和性
+     - 带宽 (MiBps)
+     - CPU利用率(%)
+
+   * - system
+     - 993.60 ±1.82
+     - 75.49 ±0.06
+
+   * - cache
+     - 973.40 ±1.52
+     - 74.90 ±0.07
+
+   * - cache (strict)
+     - 828.20 ±4.49
+     - 66.84 ±0.29
+
+现在,局部性和利用率间的权衡更清晰了。“cache”展示出相比
+“system”2%的带宽损失,而“cache (strict)”跌到20%。
+
+
+结论和建议
+----------
+
+在以上试验中,虽然一致并且也明显,但“cache”亲和性作用域
+相比“system”的性能优势并不大。然而,这影响是依赖于作用域
+间距离的,在更复杂的处理器拓扑下可能有更明显的影响。
+
+虽然这些情形下缺少工作保持是有坏处的,但比“cache (strict)”
+好多了,而且最大化工作队列利用率的需求也并不常见。因此,
+“cache”是非绑定池的默认亲和性作用域。
+
+* 由于不存在一个适用于大多数场景的选择,对于可能需要消耗
+  大量CPU的工作队列,建议通过 ``apply_workqueue_attrs()``
+  进行(专门)配置,并考虑是否启用 ``WQ_SYSFS``。
+
+* 设置了严格“cpu”亲和性作用域的非绑定工作队列,它的行为与
+  ``WQ_CPU_INTENSIVE`` 每CPU工作队列一样。后者没有真正
+  优势,而前者提供了大幅度的灵活性。
+
+* 亲和性作用域是从Linux v6.5起引入的。为了模拟旧版行为,
+  可以使用严格的“numa”亲和性作用域。
+
+* 不严格的亲和性作用域中,缺少工作保持大概缘于调度器。内核
+  为什么没能维护好大多数场景下的工作保持,把事情作对,还没有
+  理论上的解释。因此,未来调度器的改进可能会使我们不再需要
+  这些调节项。
+
+
+检查配置
+========
+
+使用 tools/workqueue/wq_dump.py(drgn脚本) 来检查未
+绑定CPU的亲和性配置,工作者池,以及工作队列如何映射到池上: ::
+
+  $ tools/workqueue/wq_dump.py
+  Affinity Scopes
+  ===============
+  wq_unbound_cpumask=0000000f
+
+  CPU
+    nr_pods  4
+    pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+    pod_node [0]=0 [1]=0 [2]=1 [3]=1
+    cpu_pod  [0]=0 [1]=1 [2]=2 [3]=3
+
+  SMT
+    nr_pods  4
+    pod_cpus [0]=00000001 [1]=00000002 [2]=00000004 [3]=00000008
+    pod_node [0]=0 [1]=0 [2]=1 [3]=1
+    cpu_pod  [0]=0 [1]=1 [2]=2 [3]=3
+
+  CACHE (default)
+    nr_pods  2
+    pod_cpus [0]=00000003 [1]=0000000c
+    pod_node [0]=0 [1]=1
+    cpu_pod  [0]=0 [1]=0 [2]=1 [3]=1
+
+  NUMA
+    nr_pods  2
+    pod_cpus [0]=00000003 [1]=0000000c
+    pod_node [0]=0 [1]=1
+    cpu_pod  [0]=0 [1]=0 [2]=1 [3]=1
+
+  SYSTEM
+    nr_pods  1
+    pod_cpus [0]=0000000f
+    pod_node [0]=-1
+    cpu_pod  [0]=0 [1]=0 [2]=0 [3]=0
+
+  Worker Pools
+  ============
+  pool[00] ref= 1 nice=  0 idle/workers=  4/  4 cpu=  0
+  pool[01] ref= 1 nice=-20 idle/workers=  2/  2 cpu=  0
+  pool[02] ref= 1 nice=  0 idle/workers=  4/  4 cpu=  1
+  pool[03] ref= 1 nice=-20 idle/workers=  2/  2 cpu=  1
+  pool[04] ref= 1 nice=  0 idle/workers=  4/  4 cpu=  2
+  pool[05] ref= 1 nice=-20 idle/workers=  2/  2 cpu=  2
+  pool[06] ref= 1 nice=  0 idle/workers=  3/  3 cpu=  3
+  pool[07] ref= 1 nice=-20 idle/workers=  2/  2 cpu=  3
+  pool[08] ref=42 nice=  0 idle/workers=  6/  6 cpus=0000000f
+  pool[09] ref=28 nice=  0 idle/workers=  3/  3 cpus=00000003
+  pool[10] ref=28 nice=  0 idle/workers= 17/ 17 cpus=0000000c
+  pool[11] ref= 1 nice=-20 idle/workers=  1/  1 cpus=0000000f
+  pool[12] ref= 2 nice=-20 idle/workers=  1/  1 cpus=00000003
+  pool[13] ref= 2 nice=-20 idle/workers=  1/  1 cpus=0000000c
+
+  Workqueue CPU -> pool
+  =====================
+  [    workqueue \ CPU              0  1  2  3 dfl]
+  events                   percpu   0  2  4  6
+  events_highpri           percpu   1  3  5  7
+  events_long              percpu   0  2  4  6
+  events_unbound           unbound  9  9 10 10  8
+  events_freezable         percpu   0  2  4  6
+  events_power_efficient   percpu   0  2  4  6
+  events_freezable_power_  percpu   0  2  4  6
+  rcu_gp                   percpu   0  2  4  6
+  rcu_par_gp               percpu   0  2  4  6
+  slub_flushwq             percpu   0  2  4  6
+  netns                    ordered  8  8  8  8  8
+  ...
+
+参见命令的帮助消息以获取更多信息。
+
+
+监视
+====
+
+使用 tools/workqueue/wq_monitor.py 来监视工作队列的运行: ::
+
+  $ tools/workqueue/wq_monitor.py events
+                              total  infl  CPUtime  CPUhog CMW/RPR  mayday rescued
+  events                      18545     0      6.1       0       5       -       -
+  events_highpri                  8     0      0.0       0       0       -       -
+  events_long                     3     0      0.0       0       0       -       -
+  events_unbound              38306     0      0.1       -       7       -       -
+  events_freezable                0     0      0.0       0       0       -       -
+  events_power_efficient      29598     0      0.2       0       0       -       -
+  events_freezable_power_        10     0      0.0       0       0       -       -
+  sock_diag_events                0     0      0.0       0       0       -       -
+
+                              total  infl  CPUtime  CPUhog CMW/RPR  mayday rescued
+  events                      18548     0      6.1       0       5       -       -
+  events_highpri                  8     0      0.0       0       0       -       -
+  events_long                     3     0      0.0       0       0       -       -
+  events_unbound              38322     0      0.1       -       7       -       -
+  events_freezable                0     0      0.0       0       0       -       -
+  events_power_efficient      29603     0      0.2       0       0       -       -
+  events_freezable_power_        10     0      0.0       0       0       -       -
+  sock_diag_events                0     0      0.0       0       0       -       -
+
+  ...
+
+参见命令的帮助消息以获取更多信息。
+
+
 调试
 ====
 
@@ -330,7 +675,6 @@ And with cmwq with ``@max_active`` >= 3, ::
 
 工作队列保证,如果在工作项排队后满足以下条件,则工作项不能重入:
 
-
         1. 工作函数没有被改变。
         2. 没有人将该工作项排到另一个工作队列中。
         3. 该工作项尚未被重新启动。
index f6dc90559341f5aa3debd8592f0da973624238e7..ec0284125e8f706cf4a6f6da241493ce8150e5fe 100644 (file)
@@ -553,7 +553,7 @@ F:  Documentation/devicetree/bindings/iio/accel/adi,adxl345.yaml
 F:     drivers/input/misc/adxl34x.c
 
 ADXL355 THREE-AXIS DIGITAL ACCELEROMETER DRIVER
-M:     Puranjay Mohan <puranjay12@gmail.com>
+M:     Puranjay Mohan <puranjay@kernel.org>
 L:     linux-iio@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/iio/accel/adi,adxl355.yaml
@@ -3714,7 +3714,7 @@ F:        drivers/iio/imu/bmi323/
 
 BPF JIT for ARM
 M:     Russell King <linux@armlinux.org.uk>
-M:     Puranjay Mohan <puranjay12@gmail.com>
+M:     Puranjay Mohan <puranjay@kernel.org>
 L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/arm/net/
@@ -3764,6 +3764,8 @@ X:        arch/riscv/net/bpf_jit_comp64.c
 
 BPF JIT for RISC-V (64-bit)
 M:     Björn Töpel <bjorn@kernel.org>
+R:     Pu Lehui <pulehui@huawei.com>
+R:     Puranjay Mohan <puranjay@kernel.org>
 L:     bpf@vger.kernel.org
 S:     Maintained
 F:     arch/riscv/net/
@@ -4191,7 +4193,6 @@ S:        Supported
 F:     drivers/scsi/bnx2i/
 
 BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
-M:     Ariel Elior <aelior@marvell.com>
 M:     Sudarsana Kalluru <skalluru@marvell.com>
 M:     Manish Chopra <manishc@marvell.com>
 L:     netdev@vger.kernel.org
@@ -15160,9 +15161,8 @@ F:      drivers/scsi/myrb.*
 F:     drivers/scsi/myrs.*
 
 MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M:     Chris Lee <christopher.lee@cspi.com>
 L:     netdev@vger.kernel.org
-S:     Supported
+S:     Orphan
 W:     https://www.cspi.com/ethernet-products/support/downloads/
 F:     drivers/net/ethernet/myricom/myri10ge/
 
@@ -17990,7 +17990,6 @@ S:      Supported
 F:     drivers/scsi/qedi/
 
 QLOGIC QL4xxx ETHERNET DRIVER
-M:     Ariel Elior <aelior@marvell.com>
 M:     Manish Chopra <manishc@marvell.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -18000,7 +17999,6 @@ F:      include/linux/qed/
 
 QLOGIC QL4xxx RDMA DRIVER
 M:     Michal Kalderon <mkalderon@marvell.com>
-M:     Ariel Elior <aelior@marvell.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/qedr/
@@ -21918,7 +21916,7 @@ F:      include/linux/soc/ti/ti_sci_inta_msi.h
 F:     include/linux/soc/ti/ti_sci_protocol.h
 
 TEXAS INSTRUMENTS' TMP117 TEMPERATURE SENSOR DRIVER
-M:     Puranjay Mohan <puranjay12@gmail.com>
+M:     Puranjay Mohan <puranjay@kernel.org>
 L:     linux-iio@vger.kernel.org
 S:     Supported
 F:     Documentation/devicetree/bindings/iio/temperature/ti,tmp117.yaml
@@ -24459,6 +24457,14 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/har
 F:     Documentation/admin-guide/LSM/Yama.rst
 F:     security/yama/
 
+YAML NETLINK (YNL)
+M:     Donald Hunter <donald.hunter@gmail.com>
+M:     Jakub Kicinski <kuba@kernel.org>
+F:     Documentation/netlink/
+F:     Documentation/userspace-api/netlink/intro-specs.rst
+F:     Documentation/userspace-api/netlink/specs.rst
+F:     tools/net/ynl/
+
 YEALINK PHONE DRIVER
 M:     Henk Vergonet <Henk.Vergonet@gmail.com>
 L:     usbb2k-api-dev@nongnu.org
index 1d672457d02ff3fcdd849c57c9108f94a8563886..72b5cd697f5d94b742fb59b98913c975838de871 100644 (file)
@@ -871,16 +871,11 @@ static inline void emit_a32_alu_r64(const bool is64, const s8 dst[],
 }
 
 /* dst = src (4 bytes)*/
-static inline void emit_a32_mov_r(const s8 dst, const s8 src, const u8 off,
-                                 struct jit_ctx *ctx) {
+static inline void emit_a32_mov_r(const s8 dst, const s8 src, struct jit_ctx *ctx) {
        const s8 *tmp = bpf2a32[TMP_REG_1];
        s8 rt;
 
        rt = arm_bpf_get_reg32(src, tmp[0], ctx);
-       if (off && off != 32) {
-               emit(ARM_LSL_I(rt, rt, 32 - off), ctx);
-               emit(ARM_ASR_I(rt, rt, 32 - off), ctx);
-       }
        arm_bpf_put_reg32(dst, rt, ctx);
 }
 
@@ -889,15 +884,15 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
                                  const s8 src[],
                                  struct jit_ctx *ctx) {
        if (!is64) {
-               emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
+               emit_a32_mov_r(dst_lo, src_lo, ctx);
                if (!ctx->prog->aux->verifier_zext)
                        /* Zero out high 4 bytes */
                        emit_a32_mov_i(dst_hi, 0, ctx);
        } else if (__LINUX_ARM_ARCH__ < 6 &&
                   ctx->cpu_architecture < CPU_ARCH_ARMv5TE) {
                /* complete 8 byte move */
-               emit_a32_mov_r(dst_lo, src_lo, 0, ctx);
-               emit_a32_mov_r(dst_hi, src_hi, 0, ctx);
+               emit_a32_mov_r(dst_lo, src_lo, ctx);
+               emit_a32_mov_r(dst_hi, src_hi, ctx);
        } else if (is_stacked(src_lo) && is_stacked(dst_lo)) {
                const u8 *tmp = bpf2a32[TMP_REG_1];
 
@@ -917,17 +912,52 @@ static inline void emit_a32_mov_r64(const bool is64, const s8 dst[],
 static inline void emit_a32_movsx_r64(const bool is64, const u8 off, const s8 dst[], const s8 src[],
                                      struct jit_ctx *ctx) {
        const s8 *tmp = bpf2a32[TMP_REG_1];
-       const s8 *rt;
+       s8 rs;
+       s8 rd;
 
-       rt = arm_bpf_get_reg64(dst, tmp, ctx);
+       if (is_stacked(dst_lo))
+               rd = tmp[1];
+       else
+               rd = dst_lo;
+       rs = arm_bpf_get_reg32(src_lo, rd, ctx);
+       /* rs may be one of src[1], dst[1], or tmp[1] */
+
+       /* Sign extend rs if needed. If off == 32, lower 32-bits of src are moved to dst and sign
+        * extension only happens in the upper 64 bits.
+        */
+       if (off != 32) {
+               /* Sign extend rs into rd */
+               emit(ARM_LSL_I(rd, rs, 32 - off), ctx);
+               emit(ARM_ASR_I(rd, rd, 32 - off), ctx);
+       } else {
+               rd = rs;
+       }
+
+       /* Write rd to dst_lo
+        *
+        * Optimization:
+        * Assume:
+        * 1. dst == src and stacked.
+        * 2. off == 32
+        *
+        * In this case src_lo was loaded into rd(tmp[1]) but rd was not sign extended as off==32.
+        * So, we don't need to write rd back to dst_lo as they have the same value.
+        * This saves us one str instruction.
+        */
+       if (dst_lo != src_lo || off != 32)
+               arm_bpf_put_reg32(dst_lo, rd, ctx);
 
-       emit_a32_mov_r(dst_lo, src_lo, off, ctx);
        if (!is64) {
                if (!ctx->prog->aux->verifier_zext)
                        /* Zero out high 4 bytes */
                        emit_a32_mov_i(dst_hi, 0, ctx);
        } else {
-               emit(ARM_ASR_I(rt[0], rt[1], 31), ctx);
+               if (is_stacked(dst_hi)) {
+                       emit(ARM_ASR_I(tmp[0], rd, 31), ctx);
+                       arm_bpf_put_reg32(dst_hi, tmp[0], ctx);
+               } else {
+                       emit(ARM_ASR_I(dst_hi, rd, 31), ctx);
+               }
        }
 }
 
index f48b8dab8b3d2fe175a02ddf3c81aa39dea5f16c..1d26bb5b02f4b592775dee6f964938f8ce4c866c 100644 (file)
@@ -338,12 +338,12 @@ int kvm_register_vgic_device(unsigned long type)
 int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
                       struct vgic_reg_attr *reg_attr)
 {
-       int cpuid;
+       int cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
 
-       cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr);
-
-       reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
        reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+       reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid);
+       if (!reg_attr->vcpu)
+               return -EINVAL;
 
        return 0;
 }
index 122021f9bdfc87c3c9634d6801edad7845b9f96e..13eac43c632dfe793912ba310f7449d59e3bd9ed 100644 (file)
@@ -1844,15 +1844,15 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
 
        emit_call(enter_prog, ctx);
 
+       /* save return value to callee saved register x20 */
+       emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
+
        /* if (__bpf_prog_enter(prog) == 0)
         *         goto skip_exec_of_prog;
         */
        branch = ctx->image + ctx->idx;
        emit(A64_NOP, ctx);
 
-       /* save return value to callee saved register x20 */
-       emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
-
        emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
        if (!p->jited)
                emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
index 1adf2f39ce59cbb691b7f89ae9fc7a5127642ca4..ec9d692838fca54ee445d10ab3d862130338cd3b 100644 (file)
@@ -722,6 +722,9 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
        if (ret)
                return ret;
 
+       /* store prog start time */
+       emit_mv(RV_REG_S1, RV_REG_A0, ctx);
+
        /* if (__bpf_prog_enter(prog) == 0)
         *      goto skip_exec_of_prog;
         */
@@ -729,9 +732,6 @@ static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_of
        /* nop reserved for conditional jump */
        emit(rv_nop(), ctx);
 
-       /* store prog start time */
-       emit_mv(RV_REG_S1, RV_REG_A0, ctx);
-
        /* arg1: &args_off */
        emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
        if (!p->jited)
index 99f7e1f2b70aff61da33cc06bc0e7455394150a8..99ea3f12c5d2ab2990c06b73602a3c513f361f7f 100644 (file)
@@ -125,8 +125,19 @@ struct s390_pxts_ctx {
 static inline int __paes_keyblob2pkey(struct key_blob *kb,
                                     struct pkey_protkey *pk)
 {
-       return pkey_keyblob2pkey(kb->key, kb->keylen,
-                                pk->protkey, &pk->len, &pk->type);
+       int i, ret = -EIO;
+
+       /* try three times in case of busy card */
+       for (i = 0; ret && i < 3; i++) {
+               if (ret == -EBUSY && in_task()) {
+                       if (msleep_interruptible(1000))
+                               return -EINTR;
+               }
+               ret = pkey_keyblob2pkey(kb->key, kb->keylen,
+                                       pk->protkey, &pk->len, &pk->type);
+       }
+
+       return ret;
 }
 
 static inline int __paes_convert_key(struct s390_paes_ctx *ctx)
index 4f21ae561e4ddc7af7bc7a21f848f0fe4315ab59..390906b8e386e609f760d891484133b1c0fbea79 100644 (file)
@@ -9,6 +9,7 @@
 #define CFI_DEF_CFA_OFFSET     .cfi_def_cfa_offset
 #define CFI_ADJUST_CFA_OFFSET  .cfi_adjust_cfa_offset
 #define CFI_RESTORE            .cfi_restore
+#define CFI_REL_OFFSET         .cfi_rel_offset
 
 #ifdef CONFIG_AS_CFI_VAL_OFFSET
 #define CFI_VAL_OFFSET         .cfi_val_offset
index 57f62596e53b958a2737f23f1985e5154179fd9e..85247ef5a41b89a390d3290cefb785c6c5a81ad6 100644 (file)
@@ -24,8 +24,10 @@ __kernel_\func:
        CFI_DEF_CFA_OFFSET (STACK_FRAME_OVERHEAD + WRAPPER_FRAME_SIZE)
        CFI_VAL_OFFSET 15, -STACK_FRAME_OVERHEAD
        stg     %r14,STACK_FRAME_OVERHEAD(%r15)
+       CFI_REL_OFFSET 14, STACK_FRAME_OVERHEAD
        brasl   %r14,__s390_vdso_\func
        lg      %r14,STACK_FRAME_OVERHEAD(%r15)
+       CFI_RESTORE 14
        aghi    %r15,WRAPPER_FRAME_SIZE
        CFI_DEF_CFA_OFFSET STACK_FRAME_OVERHEAD
        CFI_RESTORE 15
index 094b43b121cd5d8d8895af0f1c830ab5cfb1d355..12d22a7fa32fd27a97d111f7d36971ecc7461bf5 100644 (file)
@@ -2661,7 +2661,7 @@ static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
                return 0;
 
        start = pmd_val(*pmd) & HPAGE_MASK;
-       end = start + HPAGE_SIZE - 1;
+       end = start + HPAGE_SIZE;
        __storage_key_init_range(start, end);
        set_bit(PG_arch_1, &page->flags);
        cond_resched();
index c2e8242bd15dd0afb6454e9e71e9ca5ef969ba13..dc3db86e13ffb8c14ca607c6df86ede9d3cf6262 100644 (file)
@@ -139,7 +139,7 @@ static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
        }
 
        if (!test_and_set_bit(PG_arch_1, &page->flags))
-               __storage_key_init_range(paddr, paddr + size - 1);
+               __storage_key_init_range(paddr, paddr + size);
 }
 
 void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
index df5fac428408fe65ecc03766def03e0959bc539a..59cbc94b6e6903d915d9b16b04e82f269c88754b 100644 (file)
@@ -1807,36 +1807,41 @@ populate_extable:
                        if (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
                            BPF_MODE(insn->code) == BPF_PROBE_MEMSX) {
                                /* Conservatively check that src_reg + insn->off is a kernel address:
-                                *   src_reg + insn->off >= TASK_SIZE_MAX + PAGE_SIZE
-                                * src_reg is used as scratch for src_reg += insn->off and restored
-                                * after emit_ldx if necessary
+                                *   src_reg + insn->off > TASK_SIZE_MAX + PAGE_SIZE
+                                *   and
+                                *   src_reg + insn->off < VSYSCALL_ADDR
                                 */
 
-                               u64 limit = TASK_SIZE_MAX + PAGE_SIZE;
+                               u64 limit = TASK_SIZE_MAX + PAGE_SIZE - VSYSCALL_ADDR;
                                u8 *end_of_jmp;
 
-                               /* At end of these emitted checks, insn->off will have been added
-                                * to src_reg, so no need to do relative load with insn->off offset
-                                */
-                               insn_off = 0;
+                               /* movabsq r10, VSYSCALL_ADDR */
+                               emit_mov_imm64(&prog, BPF_REG_AX, (long)VSYSCALL_ADDR >> 32,
+                                              (u32)(long)VSYSCALL_ADDR);
 
-                               /* movabsq r11, limit */
-                               EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
-                               EMIT((u32)limit, 4);
-                               EMIT(limit >> 32, 4);
+                               /* mov src_reg, r11 */
+                               EMIT_mov(AUX_REG, src_reg);
 
                                if (insn->off) {
-                                       /* add src_reg, insn->off */
-                                       maybe_emit_1mod(&prog, src_reg, true);
-                                       EMIT2_off32(0x81, add_1reg(0xC0, src_reg), insn->off);
+                                       /* add r11, insn->off */
+                                       maybe_emit_1mod(&prog, AUX_REG, true);
+                                       EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
                                }
 
-                               /* cmp src_reg, r11 */
-                               maybe_emit_mod(&prog, src_reg, AUX_REG, true);
-                               EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
+                               /* sub r11, r10 */
+                               maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+                               EMIT2(0x29, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
+
+                               /* movabsq r10, limit */
+                               emit_mov_imm64(&prog, BPF_REG_AX, (long)limit >> 32,
+                                              (u32)(long)limit);
+
+                               /* cmp r10, r11 */
+                               maybe_emit_mod(&prog, AUX_REG, BPF_REG_AX, true);
+                               EMIT2(0x39, add_2reg(0xC0, AUX_REG, BPF_REG_AX));
 
-                               /* if unsigned '>=', goto load */
-                               EMIT2(X86_JAE, 0);
+                               /* if unsigned '>', goto load */
+                               EMIT2(X86_JA, 0);
                                end_of_jmp = prog;
 
                                /* xor dst_reg, dst_reg */
@@ -1862,18 +1867,6 @@ populate_extable:
                                /* populate jmp_offset for JMP above */
                                start_of_ldx[-1] = prog - start_of_ldx;
 
-                               if (insn->off && src_reg != dst_reg) {
-                                       /* sub src_reg, insn->off
-                                        * Restore src_reg after "add src_reg, insn->off" in prev
-                                        * if statement. But if src_reg == dst_reg, emit_ldx
-                                        * above already clobbered src_reg, so no need to restore.
-                                        * If add src_reg, insn->off was unnecessary, no need to
-                                        * restore either.
-                                        */
-                                       maybe_emit_1mod(&prog, src_reg, true);
-                                       EMIT2_off32(0x81, add_1reg(0xE8, src_reg), insn->off);
-                               }
-
                                if (!bpf_prog->aux->extable)
                                        break;
 
@@ -3473,3 +3466,9 @@ bool bpf_jit_supports_ptr_xchg(void)
 {
        return true;
 }
+
+/* x86-64 JIT emits its own code to filter user addresses so return 0 here */
+u64 bpf_arch_uaddress_limit(void)
+{
+       return 0;
+}
index 38bcecb0e457d9741c142cada4d38ec65ff0f88b..a2b6bb5429f5cb4c3c9694c9db6dbb8f6992d9fa 100644 (file)
@@ -100,6 +100,10 @@ void flush_cache_range(struct vm_area_struct*, ulong, ulong);
 void flush_icache_range(unsigned long start, unsigned long end);
 void flush_cache_page(struct vm_area_struct*,
                             unsigned long, unsigned long);
+#define flush_cache_all flush_cache_all
+#define flush_cache_range flush_cache_range
+#define flush_icache_range flush_icache_range
+#define flush_cache_page flush_cache_page
 #else
 #define flush_cache_all local_flush_cache_all
 #define flush_cache_range local_flush_cache_range
@@ -136,20 +140,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
 
 #else
 
-#define flush_cache_all()                              do { } while (0)
-#define flush_cache_mm(mm)                             do { } while (0)
-#define flush_cache_dup_mm(mm)                         do { } while (0)
-
-#define flush_cache_vmap(start,end)                    do { } while (0)
-#define flush_cache_vmap_early(start,end)              do { } while (0)
-#define flush_cache_vunmap(start,end)                  do { } while (0)
-
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)                                do { } while (0)
-
 #define flush_icache_range local_flush_icache_range
-#define flush_cache_page(vma, addr, pfn)               do { } while (0)
-#define flush_cache_range(vma, start, end)             do { } while (0)
 
 #endif
 
@@ -162,15 +153,14 @@ void local_flush_cache_page(struct vm_area_struct *vma,
                __invalidate_icache_range(start,(end) - (start));       \
        } while (0)
 
-#define flush_dcache_mmap_lock(mapping)                        do { } while (0)
-#define flush_dcache_mmap_unlock(mapping)              do { } while (0)
-
 #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE)
 
 extern void copy_to_user_page(struct vm_area_struct*, struct page*,
                unsigned long, void*, const void*, unsigned long);
 extern void copy_from_user_page(struct vm_area_struct*, struct page*,
                unsigned long, void*, const void*, unsigned long);
+#define copy_to_user_page copy_to_user_page
+#define copy_from_user_page copy_from_user_page
 
 #else
 
@@ -186,4 +176,6 @@ extern void copy_from_user_page(struct vm_area_struct*, struct page*,
 
 #endif
 
+#include <asm-generic/cacheflush.h>
+
 #endif /* _XTENSA_CACHEFLUSH_H */
index d008a153a2b9f7a9782b5874643f2d5a3741e995..7ed1a2085bd72883025c7de7d0d028ab077cc56c 100644 (file)
 #define MAKE_RA_FOR_CALL(ra,ws)   (((ra) & 0x3fffffff) | (ws) << 30)
 
 /* Convert return address to a valid pc
- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ * Note: 'text' is the address within the same 1GB range as the ra
  */
-#define MAKE_PC_FROM_RA(ra,sp)    (((ra) & 0x3fffffff) | ((sp) & 0xc0000000))
+#define MAKE_PC_FROM_RA(ra, text) (((ra) & 0x3fffffff) | ((unsigned long)(text) & 0xc0000000))
 
 #elif defined(__XTENSA_CALL0_ABI__)
 
 #define MAKE_RA_FOR_CALL(ra, ws)   (ra)
 
 /* Convert return address to a valid pc
- * Note: We assume that the stack pointer is in the same 1GB ranges as the ra
+ * Note: 'text' is not used as 'ra' is always the full address
  */
-#define MAKE_PC_FROM_RA(ra, sp)    (ra)
+#define MAKE_PC_FROM_RA(ra, text)  (ra)
 
 #else
 #error Unsupported Xtensa ABI
index a270467556dc84df2c6ceb0786acee3669a059f8..86c70117371bb7cc022b0793f0a4eb8ef834bc63 100644 (file)
@@ -87,7 +87,7 @@ struct pt_regs {
 # define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
 # define instruction_pointer(regs) ((regs)->pc)
 # define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
-                                              (regs)->areg[1]))
+                                              (regs)->pc))
 
 # ifndef CONFIG_SMP
 #  define profile_pc(regs) instruction_pointer(regs)
index a815577d25fd02f3b267359d923cb83aae512c3d..7bd66677f7b6de58d0a1bda1b414ebc622dce962 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/regs.h>
 #include <asm/hw_breakpoint.h>
+#include <asm/sections.h>
 #include <asm/traps.h>
 
 extern void ret_from_fork(void);
@@ -380,7 +381,7 @@ unsigned long __get_wchan(struct task_struct *p)
        int count = 0;
 
        sp = p->thread.sp;
-       pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
+       pc = MAKE_PC_FROM_RA(p->thread.ra, _text);
 
        do {
                if (sp < stack_page + sizeof(struct task_struct) ||
@@ -392,7 +393,7 @@ unsigned long __get_wchan(struct task_struct *p)
 
                /* Stack layout: sp-4: ra, sp-3: sp' */
 
-               pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+               pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), _text);
                sp = SPILL_SLOT(sp, 1);
        } while (count++ < 16);
        return 0;
index 831ffb648bda7e160408fd97530eddee760c27fc..ed324fdf2a2f9124527a5514ce14ac731996a2ad 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/stacktrace.h>
 
 #include <asm/ftrace.h>
+#include <asm/sections.h>
 #include <asm/stacktrace.h>
 #include <asm/traps.h>
 #include <linux/uaccess.h>
@@ -189,7 +190,7 @@ void walk_stackframe(unsigned long *sp,
                if (a1 <= (unsigned long)sp)
                        break;
 
-               frame.pc = MAKE_PC_FROM_RA(a0, a1);
+               frame.pc = MAKE_PC_FROM_RA(a0, _text);
                frame.sp = a1;
 
                if (fn(&frame, data))
index 8896e691c051eab87ba88ffcbac01bd4860df0de..abec44b687dff0d4da3a77307603a0866a35ce88 100644 (file)
@@ -166,10 +166,8 @@ late_initcall(rs_init);
 
 static void iss_console_write(struct console *co, const char *s, unsigned count)
 {
-       if (s && *s != 0) {
-               int len = strlen(s);
-               simc_write(1, s, count < len ? count : len);
-       }
+       if (s && *s != 0)
+               simc_write(1, s, min(count, strlen(s)));
 }
 
 static struct tty_driver* iss_console_device(struct console *c, int *index)
index 5cb425f6f02d4bd1e4aa4f0e58d846a52896fdca..0a34dd3c4f38d298b8766065026cc465801f7aa7 100644 (file)
@@ -2838,6 +2838,43 @@ int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
 }
 EXPORT_SYMBOL_GPL(regmap_read);
 
+/**
+ * regmap_read_bypassed() - Read a value from a single register direct
+ *                         from the device, bypassing the cache
+ *
+ * @map: Register map to read from
+ * @reg: Register to be read from
+ * @val: Pointer to store read value
+ *
+ * A value of zero will be returned on success, a negative errno will
+ * be returned in error cases.
+ */
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
+{
+       int ret;
+       bool bypass, cache_only;
+
+       if (!IS_ALIGNED(reg, map->reg_stride))
+               return -EINVAL;
+
+       map->lock(map->lock_arg);
+
+       bypass = map->cache_bypass;
+       cache_only = map->cache_only;
+       map->cache_bypass = true;
+       map->cache_only = false;
+
+       ret = _regmap_read(map, reg, val);
+
+       map->cache_bypass = bypass;
+       map->cache_only = cache_only;
+
+       map->unlock(map->lock_arg);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_read_bypassed);
+
 /**
  * regmap_raw_read() - Read raw data from the device
  *
index bea3d5cf8a83487909270d5f2398267250507a31..374e4efa8759fba62df2cdbbc49c9428ddb5ea5b 100644 (file)
@@ -2177,7 +2177,8 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub, struct io_uring_cmd *cmd)
                .max_hw_sectors         = p->max_sectors,
                .chunk_sectors          = p->chunk_sectors,
                .virt_boundary_mask     = p->virt_boundary_mask,
-
+               .max_segments           = USHRT_MAX,
+               .max_segment_size       = UINT_MAX,
        };
        struct gendisk *disk;
        int ret = -EINVAL;
index b0d671db178a85b6ab98de29f05b994db99a8697..ea31ac7ac1ca931a569af239dbaa2052448e8df5 100644 (file)
@@ -148,10 +148,12 @@ packet_buffer_get(struct client *client, char __user *data, size_t user_length)
        if (atomic_read(&buffer->size) == 0)
                return -ENODEV;
 
-       /* FIXME: Check length <= user_length. */
+       length = buffer->head->length;
+
+       if (length > user_length)
+               return 0;
 
        end = buffer->data + buffer->capacity;
-       length = buffer->head->length;
 
        if (&buffer->head->data[length] < end) {
                if (copy_to_user(data, buffer->head->data, length))
index 38d19410a2be68cab9f382d48ab7f15493c42af0..b9ae0340b8a70343185fe97364229dab4f72101c 100644 (file)
@@ -1556,6 +1556,8 @@ static int handle_at_packet(struct context *context,
 #define HEADER_GET_DATA_LENGTH(q)      (((q) >> 16) & 0xffff)
 #define HEADER_GET_EXTENDED_TCODE(q)   (((q) >> 0) & 0xffff)
 
+static u32 get_cycle_time(struct fw_ohci *ohci);
+
 static void handle_local_rom(struct fw_ohci *ohci,
                             struct fw_packet *packet, u32 csr)
 {
@@ -1580,6 +1582,8 @@ static void handle_local_rom(struct fw_ohci *ohci,
                                 (void *) ohci->config_rom + i, length);
        }
 
+       // Timestamping on behalf of the hardware.
+       response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
        fw_core_handle_response(&ohci->card, &response);
 }
 
@@ -1628,6 +1632,8 @@ static void handle_local_lock(struct fw_ohci *ohci,
        fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
 
  out:
+       // Timestamping on behalf of the hardware.
+       response.timestamp = cycle_time_to_ohci_tstamp(get_cycle_time(ohci));
        fw_core_handle_response(&ohci->card, &response);
 }
 
@@ -1670,8 +1676,6 @@ static void handle_local_request(struct context *ctx, struct fw_packet *packet)
        }
 }
 
-static u32 get_cycle_time(struct fw_ohci *ohci);
-
 static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
 {
        unsigned long flags;
index 5b439d04079c841e1bd698f63d96d1b428f6b2ed..50f6503fe49f5e44474b99a1cc53edf0fa97d283 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/memblock.h>
 #include <linux/spinlock.h>
 #include <linux/crash_dump.h>
+#include <linux/nmi.h>
 #include <asm/unaccepted_memory.h>
 
 /* Protects unaccepted memory bitmap and accepting_list */
@@ -149,6 +150,9 @@ retry:
        }
 
        list_del(&range.list);
+
+       touch_softlockup_watchdog();
+
        spin_unlock_irqrestore(&unaccepted_memory_lock, flags);
 }
 
index 2131de36e3dac00fee06ccbcc1aebe80d69f0fbc..e4d4e55c08ad5a3a11b7133e3f717110c958b09f 100644 (file)
@@ -220,7 +220,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
            (kfd_mem_limit.ttm_mem_used + ttm_mem_needed >
             kfd_mem_limit.max_ttm_mem_limit) ||
            (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed >
-            vram_size - reserved_for_pt)) {
+            vram_size - reserved_for_pt - atomic64_read(&adev->vram_pin_size))) {
                ret = -ENOMEM;
                goto release;
        }
index ce733e3cb35d05e445830dc22b8216bd0f6dd014..f6d503432a9ef966b2748acfd801ff04730cad6e 100644 (file)
@@ -1243,14 +1243,18 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
  * amdgpu_bo_move_notify - notification about a memory move
  * @bo: pointer to a buffer object
  * @evict: if this move is evicting the buffer from the graphics address space
+ * @new_mem: new resource for backing the BO
  *
  * Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
  * bookkeeping.
  * TTM driver callback which is called when ttm moves a buffer.
  */
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+                          bool evict,
+                          struct ttm_resource *new_mem)
 {
        struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+       struct ttm_resource *old_mem = bo->resource;
        struct amdgpu_bo *abo;
 
        if (!amdgpu_bo_is_amdgpu_bo(bo))
@@ -1262,12 +1266,12 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
        amdgpu_bo_kunmap(abo);
 
        if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
-           bo->resource->mem_type != TTM_PL_SYSTEM)
+           old_mem && old_mem->mem_type != TTM_PL_SYSTEM)
                dma_buf_move_notify(abo->tbo.base.dma_buf);
 
-       /* remember the eviction */
-       if (evict)
-               atomic64_inc(&adev->num_evictions);
+       /* move_notify is called before move happens */
+       trace_amdgpu_bo_move(abo, new_mem ? new_mem->mem_type : -1,
+                            old_mem ? old_mem->mem_type : -1);
 }
 
 void amdgpu_bo_get_memory(struct amdgpu_bo *bo,
index fa03d9e4874cc65b39e038014ab15fc4e58ba858..bc42ccbde659ac5ef1854b3a90d5561916faf422 100644 (file)
@@ -328,7 +328,9 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
 int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
                           size_t buffer_size, uint32_t *metadata_size,
                           uint64_t *flags);
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
+                          bool evict,
+                          struct ttm_resource *new_mem);
 void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
 vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
index 1d71729e3f6bcef2c02f9e1ce252dc6cd6461b94..109fe557a02bc85d562310bdc5fff082ecf8f2ba 100644 (file)
@@ -419,7 +419,7 @@ bool amdgpu_res_cpu_visible(struct amdgpu_device *adev,
                return false;
 
        if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT ||
-           res->mem_type == AMDGPU_PL_PREEMPT)
+           res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL)
                return true;
 
        if (res->mem_type != TTM_PL_VRAM)
@@ -481,14 +481,16 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
 
        if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM &&
                         bo->ttm == NULL)) {
+               amdgpu_bo_move_notify(bo, evict, new_mem);
                ttm_bo_move_null(bo, new_mem);
-               goto out;
+               return 0;
        }
        if (old_mem->mem_type == TTM_PL_SYSTEM &&
            (new_mem->mem_type == TTM_PL_TT ||
             new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
+               amdgpu_bo_move_notify(bo, evict, new_mem);
                ttm_bo_move_null(bo, new_mem);
-               goto out;
+               return 0;
        }
        if ((old_mem->mem_type == TTM_PL_TT ||
             old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
@@ -498,9 +500,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                        return r;
 
                amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
+               amdgpu_bo_move_notify(bo, evict, new_mem);
                ttm_resource_free(bo, &bo->resource);
                ttm_bo_assign_mem(bo, new_mem);
-               goto out;
+               return 0;
        }
 
        if (old_mem->mem_type == AMDGPU_PL_GDS ||
@@ -512,8 +515,9 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
            new_mem->mem_type == AMDGPU_PL_OA ||
            new_mem->mem_type == AMDGPU_PL_DOORBELL) {
                /* Nothing to save here */
+               amdgpu_bo_move_notify(bo, evict, new_mem);
                ttm_bo_move_null(bo, new_mem);
-               goto out;
+               return 0;
        }
 
        if (bo->type == ttm_bo_type_device &&
@@ -525,22 +529,23 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
        }
 
-       if (adev->mman.buffer_funcs_enabled) {
-               if (((old_mem->mem_type == TTM_PL_SYSTEM &&
-                     new_mem->mem_type == TTM_PL_VRAM) ||
-                    (old_mem->mem_type == TTM_PL_VRAM &&
-                     new_mem->mem_type == TTM_PL_SYSTEM))) {
-                       hop->fpfn = 0;
-                       hop->lpfn = 0;
-                       hop->mem_type = TTM_PL_TT;
-                       hop->flags = TTM_PL_FLAG_TEMPORARY;
-                       return -EMULTIHOP;
-               }
+       if (adev->mman.buffer_funcs_enabled &&
+           ((old_mem->mem_type == TTM_PL_SYSTEM &&
+             new_mem->mem_type == TTM_PL_VRAM) ||
+            (old_mem->mem_type == TTM_PL_VRAM &&
+             new_mem->mem_type == TTM_PL_SYSTEM))) {
+               hop->fpfn = 0;
+               hop->lpfn = 0;
+               hop->mem_type = TTM_PL_TT;
+               hop->flags = TTM_PL_FLAG_TEMPORARY;
+               return -EMULTIHOP;
+       }
 
+       amdgpu_bo_move_notify(bo, evict, new_mem);
+       if (adev->mman.buffer_funcs_enabled)
                r = amdgpu_move_blit(bo, evict, new_mem, old_mem);
-       } else {
+       else
                r = -ENODEV;
-       }
 
        if (r) {
                /* Check that all memory is CPU accessible */
@@ -555,11 +560,10 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
                        return r;
        }
 
-       trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
-out:
-       /* update statistics */
+       /* update statistics after the move */
+       if (evict)
+               atomic64_inc(&adev->num_evictions);
        atomic64_add(bo->base.size, &adev->num_bytes_moved);
-       amdgpu_bo_move_notify(bo, evict);
        return 0;
 }
 
@@ -1559,7 +1563,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
 static void
 amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
 {
-       amdgpu_bo_move_notify(bo, false);
+       amdgpu_bo_move_notify(bo, false, NULL);
 }
 
 static struct ttm_device_funcs amdgpu_bo_driver = {
index 58c1fe5421934d547bc552d5e72526468951bf69..451bb058cc62039eaf98c0e90487c0884042dfa4 100644 (file)
@@ -829,6 +829,14 @@ struct kfd_process *kfd_create_process(struct task_struct *thread)
        if (process) {
                pr_debug("Process already found\n");
        } else {
+               /* If the process just called exec(3), it is possible that the
+                * cleanup of the kfd_process (following the release of the mm
+                * of the old process image) is still in the cleanup work queue.
+                * Make sure to drain any job before trying to recreate any
+                * resource for this process.
+                */
+               flush_workqueue(kfd_process_wq);
+
                process = create_process(thread);
                if (IS_ERR(process))
                        goto out;
index f3f94d109726d380326c96e576afd5263e8a1daf..d6e71aa808d881f544053aa2b9c54e5d367cda79 100644 (file)
@@ -4537,15 +4537,18 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
        /* Determine whether to enable Replay support by default. */
        if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) {
                switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) {
-               case IP_VERSION(3, 1, 4):
-               case IP_VERSION(3, 1, 5):
-               case IP_VERSION(3, 1, 6):
-               case IP_VERSION(3, 2, 0):
-               case IP_VERSION(3, 2, 1):
-               case IP_VERSION(3, 5, 0):
-               case IP_VERSION(3, 5, 1):
-                       replay_feature_enabled = true;
-                       break;
+/*
+ * Disabled by default due to https://gitlab.freedesktop.org/drm/amd/-/issues/3344
+ *             case IP_VERSION(3, 1, 4):
+ *             case IP_VERSION(3, 1, 5):
+ *             case IP_VERSION(3, 1, 6):
+ *             case IP_VERSION(3, 2, 0):
+ *             case IP_VERSION(3, 2, 1):
+ *             case IP_VERSION(3, 5, 0):
+ *             case IP_VERSION(3, 5, 1):
+ *                     replay_feature_enabled = true;
+ *                     break;
+ */
                default:
                        replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK;
                        break;
index eee4945653e2d18d09f8bfc8175251499950f53c..c7715a17f388b34f3c2b9b0c3f2a26916f8bb509 100644 (file)
@@ -1495,7 +1495,9 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -1596,7 +1598,9 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -1681,7 +1685,9 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -1780,7 +1786,9 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -1865,7 +1873,9 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -1964,7 +1974,9 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -2045,7 +2057,9 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -2141,7 +2155,9 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -2220,7 +2236,9 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -2276,7 +2294,9 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -2347,7 +2367,9 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
@@ -2418,7 +2440,9 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf,
        for (i = 0; i < MAX_PIPES; i++) {
                pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe_ctx->stream &&
-                   pipe_ctx->stream->link == aconnector->dc_link)
+                   pipe_ctx->stream->link == aconnector->dc_link &&
+                   pipe_ctx->stream->sink &&
+                   pipe_ctx->stream->sink == aconnector->dc_sink)
                        break;
        }
 
index 05f392501c0ae3572250061b31defef7cde51fb5..ab31643b109698ec95a11da1aabdfa258705989a 100644 (file)
@@ -2948,6 +2948,7 @@ static enum bp_result construct_integrated_info(
                                result = get_integrated_info_v2_1(bp, info);
                                break;
                        case 2:
+                       case 3:
                                result = get_integrated_info_v2_2(bp, info);
                                break;
                        default:
index 644da463732093f9d3798b3de9565b5f7fd9ea0b..5506cf9b3672f80992c846b8686613427976bb81 100644 (file)
@@ -145,6 +145,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
         */
        clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
        if (safe_to_lower) {
+               if (clk_mgr_base->clks.dtbclk_en && !new_clocks->dtbclk_en) {
+                       dcn315_smu_set_dtbclk(clk_mgr, false);
+                       clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+               }
                /* check that we're not already in lower */
                if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
                        display_count = dcn315_get_active_display_cnt_wa(dc, context);
@@ -160,6 +164,10 @@ static void dcn315_update_clocks(struct clk_mgr *clk_mgr_base,
                        }
                }
        } else {
+               if (!clk_mgr_base->clks.dtbclk_en && new_clocks->dtbclk_en) {
+                       dcn315_smu_set_dtbclk(clk_mgr, true);
+                       clk_mgr_base->clks.dtbclk_en = new_clocks->dtbclk_en;
+               }
                /* check that we're not already in D0 */
                if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) {
                        union display_idle_optimization_u idle_info = { 0 };
index bec252e1dd27a98263b5bd3299cc3dfd8ed089e9..e506e4f969ca9ffc90bdf8714c936a72aa9c2b17 100644 (file)
@@ -712,8 +712,12 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
                                         * since we calculate mode support based on softmax being the max UCLK
                                         * frequency.
                                         */
-                                       dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
-                                                       dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
+                                       if (dc->debug.disable_dc_mode_overwrite) {
+                                               dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+                                               dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
+                                       } else
+                                               dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK,
+                                                               dc->clk_mgr->bw_params->dc_mode_softmax_memclk);
                                } else {
                                        dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, dc->clk_mgr->bw_params->max_memclk_mhz);
                                }
@@ -746,8 +750,13 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
                /* set UCLK to requested value if P-State switching is supported, or to re-enable P-State switching */
                if (clk_mgr_base->clks.p_state_change_support &&
                                (update_uclk || !clk_mgr_base->clks.prev_p_state_change_support) &&
-                               !dc->work_arounds.clock_update_disable_mask.uclk)
+                               !dc->work_arounds.clock_update_disable_mask.uclk) {
+                       if (dc->clk_mgr->dc_mode_softmax_enabled && dc->debug.disable_dc_mode_overwrite)
+                               dcn30_smu_set_hard_max_by_freq(clk_mgr, PPCLK_UCLK,
+                                               max((int)dc->clk_mgr->bw_params->dc_mode_softmax_memclk, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz)));
+
                        dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_UCLK, khz_to_mhz_ceil(clk_mgr_base->clks.dramclk_khz));
+               }
 
                if (clk_mgr_base->clks.num_ways != new_clocks->num_ways &&
                                clk_mgr_base->clks.num_ways > new_clocks->num_ways) {
index 03b554e912a20d4ae58e5161d55943eb0bb7a0d9..d68c83e40d4d6c3bd89e66cc5ecc63ca14c010d0 100644 (file)
@@ -1801,6 +1801,9 @@ bool dc_validate_boot_timing(const struct dc *dc,
                return false;
        }
 
+       if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)
+               return false;
+
        if (dc->link_srv->edp_is_ilr_optimization_required(link, crtc_timing)) {
                DC_LOG_EVENT_LINK_TRAINING("Seamless boot disabled to optimize eDP link rate\n");
                return false;
index 5b7ad38f85e08f422c32e48bdb4b384b8bb75e08..65e45a0b4ff34351013a6e7f5debe0eb8c676e8d 100644 (file)
@@ -395,6 +395,12 @@ void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
                                x),
                        25));
 
+       // If y rounds up to integer, carry it over to x.
+       if (y >> 25) {
+               x += 1;
+               y = 0;
+       }
+
        switch (stream_encoder_inst) {
        case 0:
                REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
index deb6d162a2d5c00df00b069a57e885b34a18e939..7307b7b8d8ad7595bcfbf09e0a39786df389398f 100644 (file)
@@ -291,6 +291,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_15_soc = {
        .do_urgent_latency_adjustment = false,
        .urgent_latency_adjustment_fabric_clock_component_us = 0,
        .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+       .dispclk_dppclk_vco_speed_mhz = 2400.0,
        .num_chans = 4,
        .dummy_pstate_latency_us = 10.0
 };
@@ -438,6 +439,7 @@ static struct _vcs_dpi_soc_bounding_box_st dcn3_16_soc = {
        .do_urgent_latency_adjustment = false,
        .urgent_latency_adjustment_fabric_clock_component_us = 0,
        .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+       .dispclk_dppclk_vco_speed_mhz = 2500.0,
 };
 
 void dcn31_zero_pipe_dcc_fraction(display_e2e_pipe_params_st *pipes,
index 5491b707cec881b9854ab96834503c1e88053380..5a965c26bf2095fa44c5f81e89c0feded2b99b38 100644 (file)
@@ -270,7 +270,7 @@ static void set_usb4_req_bw_req(struct dc_link *link, int req_bw)
 
        /* Error check whether requested and allocated are equal */
        req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity);
-       if (req_bw == link->dpia_bw_alloc_config.allocated_bw) {
+       if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) {
                DC_LOG_ERROR("%s: Request bw equals to allocated bw for link(%d)\n",
                        __func__, link->link_index);
        }
@@ -341,6 +341,14 @@ bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link)
                        ret = true;
                        init_usb4_bw_struct(link);
                        link->dpia_bw_alloc_config.bw_alloc_enabled = true;
+
+                       /*
+                        * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other
+                        * DPIA. CM release preallocation only when allocation is complete. Do zero alloc
+                        * to make the CM to release preallocation and update estimated BW correctly for
+                        * all DPIAs per host router
+                        */
+                       link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0);
                }
        }
 
index a2387cea1af9a184124121b3d631740e01c41484..622214b365a25aa50fc21a6dde5b7e6f192a55b7 100644 (file)
@@ -2449,6 +2449,7 @@ static bool dcn20_resource_construct(
        dc->caps.post_blend_color_processing = true;
        dc->caps.force_dp_tps4_for_cp2520 = true;
        dc->caps.extended_aux_timeout_support = true;
+       dc->caps.dmcub_support = true;
 
        /* Color pipeline capabilities */
        dc->caps.color.dpp.dcn_arch = 1;
index 408dbe63a90cfd5c5a5b51aa80fdd8bf7aaadde5..a0c5c41c8aa24a454c7c6ec10529ef8062ee3e9d 100644 (file)
@@ -7,13 +7,14 @@
 #include "pvr_rogue_mips.h"
 
 #include <asm/page.h>
+#include <linux/math.h>
 #include <linux/types.h>
 
 /* Forward declaration from pvr_gem.h. */
 struct pvr_gem_object;
 
-#define PVR_MIPS_PT_PAGE_COUNT ((ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K) \
-                               >> PAGE_SHIFT)
+#define PVR_MIPS_PT_PAGE_COUNT DIV_ROUND_UP(ROGUE_MIPSFW_MAX_NUM_PAGETABLE_PAGES * ROGUE_MIPSFW_PAGE_SIZE_4K, PAGE_SIZE)
+
 /**
  * struct pvr_fw_mips_data - MIPS-specific data
  */
index 6f5d376d8fcc1ecb6d9faa80b4b06ba4cd1b21e4..a11d16a16c3b25d288c07bba8ce4f545b3bda32d 100644 (file)
@@ -15,7 +15,9 @@ struct nvkm_gsp_mem {
 };
 
 struct nvkm_gsp_radix3 {
-       struct nvkm_gsp_mem mem[3];
+       struct nvkm_gsp_mem lvl0;
+       struct nvkm_gsp_mem lvl1;
+       struct sg_table lvl2;
 };
 
 int nvkm_gsp_sg(struct nvkm_device *, u64 size, struct sg_table *);
index adc60b25f8e6c61e51c7db37805fa24a89378fae..141b0a513bf52814e1977e916d81593b215de0d9 100644 (file)
@@ -205,7 +205,9 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
                break;
        case NVKM_FIRMWARE_IMG_DMA:
                nvkm_memory_unref(&memory);
-               dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys);
+               dma_unmap_single(fw->device->dev, fw->phys, sg_dma_len(&fw->mem.sgl),
+                                DMA_TO_DEVICE);
+               kfree(fw->img);
                break;
        case NVKM_FIRMWARE_IMG_SGT:
                nvkm_memory_unref(&memory);
@@ -235,14 +237,17 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
                fw->img = kmemdup(src, fw->len, GFP_KERNEL);
                break;
        case NVKM_FIRMWARE_IMG_DMA: {
-               dma_addr_t addr;
-
                len = ALIGN(fw->len, PAGE_SIZE);
 
-               fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL);
-               if (fw->img) {
-                       memcpy(fw->img, src, fw->len);
-                       fw->phys = addr;
+               fw->img = kmalloc(len, GFP_KERNEL);
+               if (!fw->img)
+                       return -ENOMEM;
+
+               memcpy(fw->img, src, fw->len);
+               fw->phys = dma_map_single(fw->device->dev, fw->img, len, DMA_TO_DEVICE);
+               if (dma_mapping_error(fw->device->dev, fw->phys)) {
+                       kfree(fw->img);
+                       return -EFAULT;
                }
 
                sg_init_one(&fw->mem.sgl, fw->img, len);
index 9858c1438aa7feda7d84ff5442f611b23f101b2d..abe41f7a34045531e90ca98b74ab03aedf5538c5 100644 (file)
@@ -1624,7 +1624,7 @@ r535_gsp_wpr_meta_init(struct nvkm_gsp *gsp)
        meta->magic = GSP_FW_WPR_META_MAGIC;
        meta->revision = GSP_FW_WPR_META_REVISION;
 
-       meta->sysmemAddrOfRadix3Elf = gsp->radix3.mem[0].addr;
+       meta->sysmemAddrOfRadix3Elf = gsp->radix3.lvl0.addr;
        meta->sizeOfRadix3Elf = gsp->fb.wpr2.elf.size;
 
        meta->sysmemAddrOfBootloader = gsp->boot.fw.addr;
@@ -1919,8 +1919,9 @@ nvkm_gsp_sg(struct nvkm_device *device, u64 size, struct sg_table *sgt)
 static void
 nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
 {
-       for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--)
-               nvkm_gsp_mem_dtor(gsp, &rx3->mem[i]);
+       nvkm_gsp_sg_free(gsp->subdev.device, &rx3->lvl2);
+       nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+       nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
 }
 
 /**
@@ -1960,36 +1961,60 @@ static int
 nvkm_gsp_radix3_sg(struct nvkm_gsp *gsp, struct sg_table *sgt, u64 size,
                   struct nvkm_gsp_radix3 *rx3)
 {
-       u64 addr;
+       struct sg_dma_page_iter sg_dma_iter;
+       struct scatterlist *sg;
+       size_t bufsize;
+       u64 *pte;
+       int ret, i, page_idx = 0;
 
-       for (int i = ARRAY_SIZE(rx3->mem) - 1; i >= 0; i--) {
-               u64 *ptes;
-               size_t bufsize;
-               int ret, idx;
+       ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl0);
+       if (ret)
+               return ret;
 
-               bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
-               ret = nvkm_gsp_mem_ctor(gsp, bufsize, &rx3->mem[i]);
-               if (ret)
-                       return ret;
+       ret = nvkm_gsp_mem_ctor(gsp, GSP_PAGE_SIZE, &rx3->lvl1);
+       if (ret)
+               goto lvl1_fail;
 
-               ptes = rx3->mem[i].data;
-               if (i == 2) {
-                       struct scatterlist *sgl;
+       // Allocate level 2
+       bufsize = ALIGN((size / GSP_PAGE_SIZE) * sizeof(u64), GSP_PAGE_SIZE);
+       ret = nvkm_gsp_sg(gsp->subdev.device, bufsize, &rx3->lvl2);
+       if (ret)
+               goto lvl2_fail;
 
-                       for_each_sgtable_dma_sg(sgt, sgl, idx) {
-                               for (int j = 0; j < sg_dma_len(sgl) / GSP_PAGE_SIZE; j++)
-                                       *ptes++ = sg_dma_address(sgl) + (GSP_PAGE_SIZE * j);
-                       }
-               } else {
-                       for (int j = 0; j < size / GSP_PAGE_SIZE; j++)
-                               *ptes++ = addr + GSP_PAGE_SIZE * j;
+       // Write the bus address of level 1 to level 0
+       pte = rx3->lvl0.data;
+       *pte = rx3->lvl1.addr;
+
+       // Write the bus address of each page in level 2 to level 1
+       pte = rx3->lvl1.data;
+       for_each_sgtable_dma_page(&rx3->lvl2, &sg_dma_iter, 0)
+               *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+
+       // Finally, write the bus address of each page in sgt to level 2
+       for_each_sgtable_sg(&rx3->lvl2, sg, i) {
+               void *sgl_end;
+
+               pte = sg_virt(sg);
+               sgl_end = (void *)pte + sg->length;
+
+               for_each_sgtable_dma_page(sgt, &sg_dma_iter, page_idx) {
+                       *pte++ = sg_page_iter_dma_address(&sg_dma_iter);
+                       page_idx++;
+
+                       // Go to the next scatterlist for level 2 if we've reached the end
+                       if ((void *)pte >= sgl_end)
+                               break;
                }
+       }
 
-               size = rx3->mem[i].size;
-               addr = rx3->mem[i].addr;
+       if (ret) {
+lvl2_fail:
+               nvkm_gsp_mem_dtor(gsp, &rx3->lvl1);
+lvl1_fail:
+               nvkm_gsp_mem_dtor(gsp, &rx3->lvl0);
        }
 
-       return 0;
+       return ret;
 }
 
 int
@@ -2021,7 +2046,7 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend)
                sr = gsp->sr.meta.data;
                sr->magic = GSP_FW_SR_META_MAGIC;
                sr->revision = GSP_FW_SR_META_REVISION;
-               sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.mem[0].addr;
+               sr->sysmemAddrOfSuspendResumeData = gsp->sr.radix3.lvl0.addr;
                sr->sizeOfSuspendResumeData = len;
 
                mbox0 = lower_32_bits(gsp->sr.meta.addr);
index d037b3b8b9993458a8f1615902363a6663a94052..5b15d0294836756a1e3cfced716b86cd4e093e76 100644 (file)
@@ -177,7 +177,7 @@ config DRM_PANEL_ILITEK_IL9322
 
 config DRM_PANEL_ILITEK_ILI9341
        tristate "Ilitek ILI9341 240x320 QVGA panels"
-       depends on OF && SPI
+       depends on SPI
        select DRM_KMS_HELPER
        select DRM_GEM_DMA_HELPER
        depends on BACKLIGHT_CLASS_DEVICE
index 3574681891e816f5f32a012814e22f7335f687d2..b933380b7eb783fad1d1101b61d7a630ade11315 100644 (file)
@@ -22,8 +22,9 @@
 #include <linux/bitops.h>
 #include <linux/delay.h>
 #include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
 #include <linux/module.h>
-#include <linux/of.h>
+#include <linux/property.h>
 #include <linux/regulator/consumer.h>
 #include <linux/spi/spi.h>
 
@@ -421,7 +422,7 @@ static int ili9341_dpi_prepare(struct drm_panel *panel)
 
        ili9341_dpi_init(ili);
 
-       return ret;
+       return 0;
 }
 
 static int ili9341_dpi_enable(struct drm_panel *panel)
@@ -691,7 +692,7 @@ static int ili9341_dpi_probe(struct spi_device *spi, struct gpio_desc *dc,
         * Every new incarnation of this display must have a unique
         * data entry for the system in this driver.
         */
-       ili->conf = of_device_get_match_data(dev);
+       ili->conf = device_get_match_data(dev);
        if (!ili->conf) {
                dev_err(dev, "missing device configuration\n");
                return -ENODEV;
@@ -714,18 +715,18 @@ static int ili9341_probe(struct spi_device *spi)
 
        reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
        if (IS_ERR(reset))
-               dev_err(dev, "Failed to get gpio 'reset'\n");
+               return dev_err_probe(dev, PTR_ERR(reset), "Failed to get gpio 'reset'\n");
 
        dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
        if (IS_ERR(dc))
-               dev_err(dev, "Failed to get gpio 'dc'\n");
+               return dev_err_probe(dev, PTR_ERR(dc), "Failed to get gpio 'dc'\n");
 
        if (!strcmp(id->name, "sf-tc240t-9370-t"))
                return ili9341_dpi_probe(spi, dc, reset);
        else if (!strcmp(id->name, "yx240qv29"))
                return ili9341_dbi_probe(spi, dc, reset);
 
-       return -1;
+       return -ENODEV;
 }
 
 static void ili9341_remove(struct spi_device *spi)
index 578a7c37f00bd7a3c8a5f1d5e93d4cd80fdc6cc9..d776e3f87064fa2e86387fb69a0570259ac95fe7 100644 (file)
@@ -92,7 +92,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
         */
        if (bdev->pool.use_dma_alloc && cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) {
                page_flags |= TTM_TT_FLAG_DECRYPTED;
-               drm_info(ddev, "TT memory decryption enabled.");
+               drm_info_once(ddev, "TT memory decryption enabled.");
        }
 
        bo->ttm = bdev->funcs->ttm_tt_create(bo, page_flags);
index e5eb21a471a6010aa956c811522956f27b99a096..00144632c600ebb6a63ab80c55eae90eed95375f 100644 (file)
@@ -204,6 +204,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
                             VMW_BO_DOMAIN_VRAM,
                             VMW_BO_DOMAIN_VRAM);
        buf->places[0].lpfn = PFN_UP(bo->resource->size);
+       buf->busy_places[0].lpfn = PFN_UP(bo->resource->size);
        ret = ttm_bo_validate(bo, &buf->placement, &ctx);
 
        /* For some reason we didn't end up at the start of vram */
index 2a0cda324703147ef36ff83cd891c70c3395c11d..5efc6a766f64e467a68223376b2a97aa62b278e6 100644 (file)
@@ -991,7 +991,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
        }
 
        event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
-       event->event.base.length = sizeof(*event);
+       event->event.base.length = sizeof(event->event);
        event->event.user_data = user_data;
 
        ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
index 420eba0e4be00b8d241bfe2d344837b262095785..854a7bb53567495a4cade766f62c985e8106a70d 100644 (file)
@@ -84,7 +84,8 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
 #define IS_ROCKETLAKE(dev_priv)        IS_PLATFORM(dev_priv, XE_ROCKETLAKE)
 #define IS_DG1(dev_priv)        IS_PLATFORM(dev_priv, XE_DG1)
 #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S)
-#define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P)
+#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \
+                                 IS_PLATFORM(dev_priv, XE_ALDERLAKE_N))
 #define IS_XEHPSDV(dev_priv) (dev_priv && 0)
 #define IS_DG2(dev_priv)       IS_PLATFORM(dev_priv, XE_DG2)
 #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC)
index 3d4c8f342e215ed39263ba5c4c01079072dfcbbd..32cd0c978aa289efadec2f047dec7dd08b103de3 100644 (file)
@@ -1606,6 +1606,9 @@ static void vm_destroy_work_func(struct work_struct *w)
        /* xe_vm_close_and_put was not called? */
        xe_assert(xe, !vm->size);
 
+       if (xe_vm_in_preempt_fence_mode(vm))
+               flush_work(&vm->preempt.rebind_work);
+
        mutex_destroy(&vm->snap_mutex);
 
        if (!(vm->flags & XE_VM_FLAG_MIGRATION))
index 59b5dd0e2f41d2a8751a4f5139e39302acb2b7bd..14daf432f30b5c9f01502d4d2b44909930c8e75a 100644 (file)
@@ -5705,7 +5705,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6141,
                .family = MV88E6XXX_FAMILY_6341,
                .name = "Marvell 88E6141",
-               .num_databases = 4096,
+               .num_databases = 256,
                .num_macs = 2048,
                .num_ports = 6,
                .num_internal_phys = 5,
@@ -6164,7 +6164,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
                .prod_num = MV88E6XXX_PORT_SWITCH_ID_PROD_6341,
                .family = MV88E6XXX_FAMILY_6341,
                .name = "Marvell 88E6341",
-               .num_databases = 4096,
+               .num_databases = 256,
                .num_macs = 2048,
                .num_internal_phys = 5,
                .num_ports = 6,
index b1f84b37032a7833d7e4f3d045e8755ace6f79d3..c7e7dac057a336d086bdf9569286fadec7e1d3be 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Broadcom GENET (Gigabit Ethernet) controller driver
  *
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
  */
 
 #define pr_fmt(fmt)                            "bcmgenet: " fmt
@@ -2467,14 +2467,18 @@ static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
 {
        u32 reg;
 
+       spin_lock_bh(&priv->reg_lock);
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       if (reg & CMD_SW_RESET)
+       if (reg & CMD_SW_RESET) {
+               spin_unlock_bh(&priv->reg_lock);
                return;
+       }
        if (enable)
                reg |= mask;
        else
                reg &= ~mask;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       spin_unlock_bh(&priv->reg_lock);
 
        /* UniMAC stops on a packet boundary, wait for a full-size packet
         * to be processed
@@ -2490,8 +2494,10 @@ static void reset_umac(struct bcmgenet_priv *priv)
        udelay(10);
 
        /* issue soft reset and disable MAC while updating its registers */
+       spin_lock_bh(&priv->reg_lock);
        bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
        udelay(2);
+       spin_unlock_bh(&priv->reg_lock);
 }
 
 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -3334,7 +3340,9 @@ static void bcmgenet_netif_start(struct net_device *dev)
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
        /* Start the network engine */
+       netif_addr_lock_bh(dev);
        bcmgenet_set_rx_mode(dev);
+       netif_addr_unlock_bh(dev);
        bcmgenet_enable_rx_napi(priv);
 
        umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
@@ -3595,16 +3603,19 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
         * 3. The number of filters needed exceeds the number filters
         *    supported by the hardware.
        */
+       spin_lock(&priv->reg_lock);
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
            (nfilter > MAX_MDF_FILTER)) {
                reg |= CMD_PROMISC;
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+               spin_unlock(&priv->reg_lock);
                bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
                return;
        } else {
                reg &= ~CMD_PROMISC;
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+               spin_unlock(&priv->reg_lock);
        }
 
        /* update MDF filter */
@@ -4003,6 +4014,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
                goto err;
        }
 
+       spin_lock_init(&priv->reg_lock);
        spin_lock_init(&priv->lock);
 
        /* Set default pause parameters */
index 7523b60b3c1c016de74fd74b7d1e286596aac1ae..43b923c48b14f40e0cf9cdd6b6f3ac16c301ef97 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
  */
 
 #ifndef __BCMGENET_H__
@@ -573,6 +573,8 @@ struct bcmgenet_rxnfc_rule {
 /* device context */
 struct bcmgenet_priv {
        void __iomem *base;
+       /* reg_lock: lock to serialize access to shared registers */
+       spinlock_t reg_lock;
        enum bcmgenet_version version;
        struct net_device *dev;
 
index 7a41cad5788f4edd6691552901f4b2eb8fdebc36..1248792d7fd4d2f98847889c499277a92229d785 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
  *
- * Copyright (c) 2014-2020 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
  */
 
 #define pr_fmt(fmt)                            "bcmgenet_wol: " fmt
@@ -151,6 +151,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
        }
 
        /* Can't suspend with WoL if MAC is still in reset */
+       spin_lock_bh(&priv->reg_lock);
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        if (reg & CMD_SW_RESET)
                reg &= ~CMD_SW_RESET;
@@ -158,6 +159,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
        /* disable RX */
        reg &= ~CMD_RX_EN;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       spin_unlock_bh(&priv->reg_lock);
        mdelay(10);
 
        if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
@@ -203,6 +205,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
        }
 
        /* Enable CRC forward */
+       spin_lock_bh(&priv->reg_lock);
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        priv->crc_fwd_en = 1;
        reg |= CMD_CRC_FWD;
@@ -210,6 +213,7 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
        /* Receiver must be enabled for WOL MP detection */
        reg |= CMD_RX_EN;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       spin_unlock_bh(&priv->reg_lock);
 
        reg = UMAC_IRQ_MPD_R;
        if (hfb_enable)
@@ -256,7 +260,9 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
        }
 
        /* Disable CRC Forward */
+       spin_lock_bh(&priv->reg_lock);
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        reg &= ~CMD_CRC_FWD;
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       spin_unlock_bh(&priv->reg_lock);
 }
index 9ada89355747554dc458c311f96333866cf7bc66..c4a3698cef66f61d775c783b334a035589d55bdd 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Broadcom GENET MDIO routines
  *
- * Copyright (c) 2014-2017 Broadcom
+ * Copyright (c) 2014-2024 Broadcom
  */
 
 #include <linux/acpi.h>
@@ -76,6 +76,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
        reg |= RGMII_LINK;
        bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
 
+       spin_lock_bh(&priv->reg_lock);
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
                       CMD_HD_EN |
@@ -88,6 +89,7 @@ static void bcmgenet_mac_config(struct net_device *dev)
                reg |= CMD_TX_EN | CMD_RX_EN;
        }
        bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       spin_unlock_bh(&priv->reg_lock);
 
        active = phy_init_eee(phydev, 0) >= 0;
        bcmgenet_eee_enable_set(dev,
@@ -275,6 +277,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
         * block for the interface to work, unconditionally clear the
         * Out-of-band disable since we do not need it.
         */
+       mutex_lock(&phydev->lock);
        reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
        reg &= ~OOB_DISABLE;
        if (priv->ext_phy) {
@@ -286,6 +289,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                        reg |= RGMII_MODE_EN;
        }
        bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+       mutex_unlock(&phydev->lock);
 
        if (init)
                dev_info(kdev, "configuring instance for %s\n", phy_name);
index 7246e13dd559fc37170c791644cf5a8fd8d04fcf..97291bfbeea589e8ca8ab637db8bcff7e573322f 100644 (file)
@@ -312,7 +312,7 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
        void *kern_buf;
 
        /* Copy the user space buf */
-       kern_buf = memdup_user(buf, nbytes);
+       kern_buf = memdup_user_nul(buf, nbytes);
        if (IS_ERR(kern_buf))
                return PTR_ERR(kern_buf);
 
@@ -372,7 +372,7 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
        void *kern_buf;
 
        /* Copy the user space buf */
-       kern_buf = memdup_user(buf, nbytes);
+       kern_buf = memdup_user_nul(buf, nbytes);
        if (IS_ERR(kern_buf))
                return PTR_ERR(kern_buf);
 
index 49d5808b7d11d281796debc6be5f9b3a9b1f60e3..de52bcb884c417098ff5aff6aea77753d055c671 100644 (file)
@@ -2670,12 +2670,12 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
        lb->loopback = 1;
 
        q = &adap->sge.ethtxq[pi->first_qset];
-       __netif_tx_lock(q->txq, smp_processor_id());
+       __netif_tx_lock_bh(q->txq);
 
        reclaim_completed_tx(adap, &q->q, -1, true);
        credits = txq_avail(&q->q) - ndesc;
        if (unlikely(credits < 0)) {
-               __netif_tx_unlock(q->txq);
+               __netif_tx_unlock_bh(q->txq);
                return -ENOMEM;
        }
 
@@ -2710,7 +2710,7 @@ int cxgb4_selftest_lb_pkt(struct net_device *netdev)
        init_completion(&lb->completion);
        txq_advance(&q->q, ndesc);
        cxgb4_ring_tx_db(adap, &q->q, ndesc);
-       __netif_tx_unlock(q->txq);
+       __netif_tx_unlock_bh(q->txq);
 
        /* wait for the pkt to return */
        ret = wait_for_completion_timeout(&lb->completion, 10 * HZ);
index 93544f1cc2a51be0c84c33391211bf47d2675edb..f7ae0e0aa4a4f52d7ff720c11fb906cac00f7c5a 100644 (file)
@@ -157,7 +157,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
                 * the lower time out
                 */
                for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
-                       usleep_range(50, 60);
+                       udelay(50);
                        mdic = er32(MDIC);
                        if (mdic & E1000_MDIC_READY)
                                break;
@@ -181,7 +181,7 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
                 * reading duplicate data in the next MDIC transaction.
                 */
                if (hw->mac.type == e1000_pch2lan)
-                       usleep_range(100, 150);
+                       udelay(100);
 
                if (success) {
                        *data = (u16)mdic;
@@ -237,7 +237,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
                 * the lower time out
                 */
                for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
-                       usleep_range(50, 60);
+                       udelay(50);
                        mdic = er32(MDIC);
                        if (mdic & E1000_MDIC_READY)
                                break;
@@ -261,7 +261,7 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
                 * reading duplicate data in the next MDIC transaction.
                 */
                if (hw->mac.type == e1000_pch2lan)
-                       usleep_range(100, 150);
+                       udelay(100);
 
                if (success)
                        return 0;
index d252d98218d084e9782d899d4f854c0bf7101622..9fc0fd95a13d8f2c40475a94f38d3a2c088ab6f4 100644 (file)
@@ -171,7 +171,7 @@ ice_debugfs_module_write(struct file *filp, const char __user *buf,
        if (*ppos != 0 || count > 8)
                return -EINVAL;
 
-       cmd_buf = memdup_user(buf, count);
+       cmd_buf = memdup_user_nul(buf, count);
        if (IS_ERR(cmd_buf))
                return PTR_ERR(cmd_buf);
 
@@ -257,7 +257,7 @@ ice_debugfs_nr_messages_write(struct file *filp, const char __user *buf,
        if (*ppos != 0 || count > 4)
                return -EINVAL;
 
-       cmd_buf = memdup_user(buf, count);
+       cmd_buf = memdup_user_nul(buf, count);
        if (IS_ERR(cmd_buf))
                return PTR_ERR(cmd_buf);
 
@@ -332,7 +332,7 @@ ice_debugfs_enable_write(struct file *filp, const char __user *buf,
        if (*ppos != 0 || count > 2)
                return -EINVAL;
 
-       cmd_buf = memdup_user(buf, count);
+       cmd_buf = memdup_user_nul(buf, count);
        if (IS_ERR(cmd_buf))
                return PTR_ERR(cmd_buf);
 
@@ -428,7 +428,7 @@ ice_debugfs_log_size_write(struct file *filp, const char __user *buf,
        if (*ppos != 0 || count > 5)
                return -EINVAL;
 
-       cmd_buf = memdup_user(buf, count);
+       cmd_buf = memdup_user_nul(buf, count);
        if (IS_ERR(cmd_buf))
                return PTR_ERR(cmd_buf);
 
index 2500f5ba4f5a42b43ee5de8d64d7f06fb70cdd87..881d704644fbee77cf5c9f7137d539463c08c2dd 100644 (file)
@@ -999,12 +999,10 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
        u16 pcifunc;
        int ret, lf;
 
-       cmd_buf = memdup_user(buffer, count + 1);
+       cmd_buf = memdup_user_nul(buffer, count);
        if (IS_ERR(cmd_buf))
                return -ENOMEM;
 
-       cmd_buf[count] = '\0';
-
        cmd_buf_tmp = strchr(cmd_buf, '\n');
        if (cmd_buf_tmp) {
                *cmd_buf_tmp = '\0';
index a5ac21a0ee33ff01e8bcdcf5757a76a863d6543b..cb6b33a228ea2063edd4f795a1f3368d1fbc483a 100644 (file)
@@ -1868,8 +1868,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
                            struct flow_cls_offload *f)
 {
        struct qede_arfs_fltr_node *n;
-       int min_hlen, rc = -EINVAL;
        struct qede_arfs_tuple t;
+       int min_hlen, rc;
 
        __qede_lock(edev);
 
@@ -1879,7 +1879,8 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
        }
 
        /* parse flower attribute and prepare filter */
-       if (qede_parse_flow_attr(edev, proto, f->rule, &t))
+       rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
+       if (rc)
                goto unlock;
 
        /* Validate profile mode and number of filters */
@@ -1888,11 +1889,13 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
                DP_NOTICE(edev,
                          "Filter configuration invalidated, filter mode=0x%x, configured mode=0x%x, filter count=0x%x\n",
                          t.mode, edev->arfs->mode, edev->arfs->filter_count);
+               rc = -EINVAL;
                goto unlock;
        }
 
        /* parse tc actions and get the vf_id */
-       if (qede_parse_actions(edev, &f->rule->action, f->common.extack))
+       rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
+       if (rc)
                goto unlock;
 
        if (qede_flow_find_fltr(edev, &t)) {
@@ -1998,10 +2001,9 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
        if (IS_ERR(flow))
                return PTR_ERR(flow);
 
-       if (qede_parse_flow_attr(edev, proto, flow->rule, t)) {
-               err = -EINVAL;
+       err = qede_parse_flow_attr(edev, proto, flow->rule, t);
+       if (err)
                goto err_out;
-       }
 
        /* Make sure location is valid and filter isn't already set */
        err = qede_flow_spec_validate(edev, &flow->rule->action, t,
index ba319fc219571975597bf7aad3d913e77dac6898..3a9148fb1422ba5bedcbfe368fb611fc60732f66 100644 (file)
@@ -1674,6 +1674,7 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
        bool raw_proto = false;
        void *oiph;
        __be32 vni = 0;
+       int nh;
 
        /* Need UDP and VXLAN header to be present */
        if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1762,12 +1763,28 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
                skb->pkt_type = PACKET_HOST;
        }
 
-       oiph = skb_network_header(skb);
+       /* Save offset of outer header relative to skb->head,
+        * because we are going to reset the network header to the inner header
+        * and might change skb->head.
+        */
+       nh = skb_network_header(skb) - skb->head;
+
        skb_reset_network_header(skb);
 
+       if (!pskb_inet_may_pull(skb)) {
+               DEV_STATS_INC(vxlan->dev, rx_length_errors);
+               DEV_STATS_INC(vxlan->dev, rx_errors);
+               vxlan_vnifilter_count(vxlan, vni, vninode,
+                                     VXLAN_VNI_STATS_RX_ERRORS, 0);
+               goto drop;
+       }
+
+       /* Get the outer header. */
+       oiph = skb->head + nh;
+
        if (!vxlan_ecn_decapsulate(vs, oiph, skb)) {
-               ++vxlan->dev->stats.rx_frame_errors;
-               ++vxlan->dev->stats.rx_errors;
+               DEV_STATS_INC(vxlan->dev, rx_frame_errors);
+               DEV_STATS_INC(vxlan->dev, rx_errors);
                vxlan_vnifilter_count(vxlan, vni, vninode,
                                      VXLAN_VNI_STATS_RX_ERRORS, 0);
                goto drop;
@@ -1837,7 +1854,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
                goto out;
 
        if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
-               dev->stats.tx_dropped++;
+               dev_core_stats_tx_dropped_inc(dev);
+               vxlan_vnifilter_count(vxlan, vni, NULL,
+                                     VXLAN_VNI_STATS_TX_DROPS, 0);
                goto out;
        }
        parp = arp_hdr(skb);
@@ -1893,7 +1912,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
                reply->pkt_type = PACKET_HOST;
 
                if (netif_rx(reply) == NET_RX_DROP) {
-                       dev->stats.rx_dropped++;
+                       dev_core_stats_rx_dropped_inc(dev);
                        vxlan_vnifilter_count(vxlan, vni, NULL,
                                              VXLAN_VNI_STATS_RX_DROPS, 0);
                }
@@ -2052,7 +2071,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni)
                        goto out;
 
                if (netif_rx(reply) == NET_RX_DROP) {
-                       dev->stats.rx_dropped++;
+                       dev_core_stats_rx_dropped_inc(dev);
                        vxlan_vnifilter_count(vxlan, vni, NULL,
                                              VXLAN_VNI_STATS_RX_DROPS, 0);
                }
@@ -2263,7 +2282,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
                                      len);
        } else {
 drop:
-               dev->stats.rx_dropped++;
+               dev_core_stats_rx_dropped_inc(dev);
                vxlan_vnifilter_count(dst_vxlan, vni, NULL,
                                      VXLAN_VNI_STATS_RX_DROPS, 0);
        }
@@ -2295,7 +2314,7 @@ static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
                                           addr_family, dst_port,
                                           vxlan->cfg.flags);
                if (!dst_vxlan) {
-                       dev->stats.tx_errors++;
+                       DEV_STATS_INC(dev, tx_errors);
                        vxlan_vnifilter_count(vxlan, vni, NULL,
                                              VXLAN_VNI_STATS_TX_ERRORS, 0);
                        kfree_skb(skb);
@@ -2559,7 +2578,7 @@ out_unlock:
        return;
 
 drop:
-       dev->stats.tx_dropped++;
+       dev_core_stats_tx_dropped_inc(dev);
        vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_DROPS, 0);
        dev_kfree_skb(skb);
        return;
@@ -2567,11 +2586,11 @@ drop:
 tx_error:
        rcu_read_unlock();
        if (err == -ELOOP)
-               dev->stats.collisions++;
+               DEV_STATS_INC(dev, collisions);
        else if (err == -ENETUNREACH)
-               dev->stats.tx_carrier_errors++;
+               DEV_STATS_INC(dev, tx_carrier_errors);
        dst_release(ndst);
-       dev->stats.tx_errors++;
+       DEV_STATS_INC(dev, tx_errors);
        vxlan_vnifilter_count(vxlan, vni, NULL, VXLAN_VNI_STATS_TX_ERRORS, 0);
        kfree_skb(skb);
 }
@@ -2604,7 +2623,7 @@ static void vxlan_xmit_nh(struct sk_buff *skb, struct net_device *dev,
        return;
 
 drop:
-       dev->stats.tx_dropped++;
+       dev_core_stats_tx_dropped_inc(dev);
        vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
                              VXLAN_VNI_STATS_TX_DROPS, 0);
        dev_kfree_skb(skb);
@@ -2642,7 +2661,7 @@ static netdev_tx_t vxlan_xmit_nhid(struct sk_buff *skb, struct net_device *dev,
        return NETDEV_TX_OK;
 
 drop:
-       dev->stats.tx_dropped++;
+       dev_core_stats_tx_dropped_inc(dev);
        vxlan_vnifilter_count(netdev_priv(dev), vni, NULL,
                              VXLAN_VNI_STATS_TX_DROPS, 0);
        dev_kfree_skb(skb);
@@ -2739,7 +2758,7 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
                            !is_multicast_ether_addr(eth->h_dest))
                                vxlan_fdb_miss(vxlan, eth->h_dest);
 
-                       dev->stats.tx_dropped++;
+                       dev_core_stats_tx_dropped_inc(dev);
                        vxlan_vnifilter_count(vxlan, vni, NULL,
                                              VXLAN_VNI_STATS_TX_DROPS, 0);
                        kfree_skb(skb);
index 27281a9a8951dbd53f30a27a14e0ac0be9a35c5d..095f59e7aa937aa3ae9d8e3fc3f1a25608c99f25 100644 (file)
@@ -628,27 +628,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
 }
 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state);
 
-/*
- * Returns true for sink states that can't ever transition back to live.
- */
-static bool nvme_state_terminal(struct nvme_ctrl *ctrl)
-{
-       switch (nvme_ctrl_state(ctrl)) {
-       case NVME_CTRL_NEW:
-       case NVME_CTRL_LIVE:
-       case NVME_CTRL_RESETTING:
-       case NVME_CTRL_CONNECTING:
-               return false;
-       case NVME_CTRL_DELETING:
-       case NVME_CTRL_DELETING_NOIO:
-       case NVME_CTRL_DEAD:
-               return true;
-       default:
-               WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
-               return true;
-       }
-}
-
 /*
  * Waits for the controller state to be resetting, or returns false if it is
  * not possible to ever transition to that state.
@@ -3681,7 +3660,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, struct nvme_ns_info *info)
                                "Found shared namespace %d, but multipathing not supported.\n",
                                info->nsid);
                        dev_warn_once(ctrl->device,
-                               "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0\n.");
+                               "Support for shared namespaces without CONFIG_NVME_MULTIPATH is deprecated and will be removed in Linux 6.0.\n");
                }
        }
 
index 5397fb428b242cea36f8c36997e134f6463e9f39..d16e976ae1a4732efef32a9da1da88be6567f621 100644 (file)
@@ -247,7 +247,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
                if (nvme_path_is_disabled(ns))
                        continue;
 
-               if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+               if (ns->ctrl->numa_node != NUMA_NO_NODE &&
+                   READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
                        distance = node_distance(node, ns->ctrl->numa_node);
                else
                        distance = LOCAL_DISTANCE;
index d0ed64dc7380e51577bc6ece92db1d0a273905a7..b564c5f1450c6a506302c482882d2be31a8eb5c6 100644 (file)
@@ -741,6 +741,27 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
                nvme_tag_from_cid(command_id) >= NVME_AQ_BLK_MQ_DEPTH;
 }
 
+/*
+ * Returns true for sink states that can't ever transition back to live.
+ */
+static inline bool nvme_state_terminal(struct nvme_ctrl *ctrl)
+{
+       switch (nvme_ctrl_state(ctrl)) {
+       case NVME_CTRL_NEW:
+       case NVME_CTRL_LIVE:
+       case NVME_CTRL_RESETTING:
+       case NVME_CTRL_CONNECTING:
+               return false;
+       case NVME_CTRL_DELETING:
+       case NVME_CTRL_DELETING_NOIO:
+       case NVME_CTRL_DEAD:
+               return true;
+       default:
+               WARN_ONCE(1, "Unhandled ctrl state:%d", ctrl->state);
+               return true;
+       }
+}
+
 void nvme_complete_rq(struct request *req);
 void nvme_complete_batch_req(struct request *req);
 
index 8e0bb9692685d4638bd21dad44fe1fbbbb147a77..e393f6947ce493f8674a7bb971dc4d4abae192ca 100644 (file)
@@ -1286,6 +1286,9 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
        u32 csts = readl(dev->bar + NVME_REG_CSTS);
        u8 opcode;
 
+       if (nvme_state_terminal(&dev->ctrl))
+               goto disable;
+
        /* If PCI error recovery process is happening, we cannot reset or
         * the recovery mechanism will surely fail.
         */
@@ -1390,8 +1393,11 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req)
        return BLK_EH_RESET_TIMER;
 
 disable:
-       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING))
+       if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RESETTING)) {
+               if (nvme_state_terminal(&dev->ctrl))
+                       nvme_dev_disable(dev, true);
                return BLK_EH_DONE;
+       }
 
        nvme_dev_disable(dev, false);
        if (nvme_try_sched_reset(&dev->ctrl))
index fdbcdcedcee99f064cc7258d22b7fe737d285eda..28bc2f373cfa0e715040a7d4f5a18db4690ca5dc 100644 (file)
@@ -360,12 +360,18 @@ static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
        } while (ret > 0);
 }
 
-static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+static inline bool nvme_tcp_queue_has_pending(struct nvme_tcp_queue *queue)
 {
        return !list_empty(&queue->send_list) ||
                !llist_empty(&queue->req_list);
 }
 
+static inline bool nvme_tcp_queue_more(struct nvme_tcp_queue *queue)
+{
+       return !nvme_tcp_tls(&queue->ctrl->ctrl) &&
+               nvme_tcp_queue_has_pending(queue);
+}
+
 static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                bool sync, bool last)
 {
@@ -386,7 +392,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
                mutex_unlock(&queue->send_mutex);
        }
 
-       if (last && nvme_tcp_queue_more(queue))
+       if (last && nvme_tcp_queue_has_pending(queue))
                queue_work_on(queue->io_cpu, nvme_tcp_wq, &queue->io_work);
 }
 
index 3ddbc3880cac8d327074fe8f923b9f257979c8bd..fb518b00f71f6034a74be5746fc30b5bb090b972 100644 (file)
@@ -285,9 +285,9 @@ int nvmet_auth_host_hash(struct nvmet_req *req, u8 *response,
        }
 
        if (shash_len != crypto_shash_digestsize(shash_tfm)) {
-               pr_debug("%s: hash len mismatch (len %d digest %d)\n",
-                        __func__, shash_len,
-                        crypto_shash_digestsize(shash_tfm));
+               pr_err("%s: hash len mismatch (len %d digest %d)\n",
+                       __func__, shash_len,
+                       crypto_shash_digestsize(shash_tfm));
                ret = -EINVAL;
                goto out_free_tfm;
        }
@@ -370,7 +370,7 @@ out_free_response:
        nvme_auth_free_key(transformed_key);
 out_free_tfm:
        crypto_free_shash(shash_tfm);
-       return 0;
+       return ret;
 }
 
 int nvmet_auth_ctrl_hash(struct nvmet_req *req, u8 *response,
index a2325330bf22145202837aa5cf89d9ec6543ab59..89a001101a7fca08bf047405aac92704bdd9a4c9 100644 (file)
@@ -754,6 +754,19 @@ static struct configfs_attribute *nvmet_ns_attrs[] = {
        NULL,
 };
 
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid)
+{
+       struct config_item *ns_item;
+       char name[4] = {};
+
+       if (sprintf(name, "%u", nsid) <= 0)
+               return false;
+       mutex_lock(&subsys->namespaces_group.cg_subsys->su_mutex);
+       ns_item = config_group_find_item(&subsys->namespaces_group, name);
+       mutex_unlock(&subsys->namespaces_group.cg_subsys->su_mutex);
+       return ns_item != NULL;
+}
+
 static void nvmet_ns_release(struct config_item *item)
 {
        struct nvmet_ns *ns = to_nvmet_ns(item);
index 8860a3eb71ec891e948a34060f34b4b148553418..e06013c5dace94d8b91960451b868fe6eaac2137 100644 (file)
@@ -437,10 +437,13 @@ void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
 u16 nvmet_req_find_ns(struct nvmet_req *req)
 {
        u32 nsid = le32_to_cpu(req->cmd->common.nsid);
+       struct nvmet_subsys *subsys = nvmet_req_subsys(req);
 
-       req->ns = xa_load(&nvmet_req_subsys(req)->namespaces, nsid);
+       req->ns = xa_load(&subsys->namespaces, nsid);
        if (unlikely(!req->ns)) {
                req->error_loc = offsetof(struct nvme_common_command, nsid);
+               if (nvmet_subsys_nsid_exists(subsys, nsid))
+                       return NVME_SC_INTERNAL_PATH_ERROR;
                return NVME_SC_INVALID_NS | NVME_SC_DNR;
        }
 
index f460728e1df1fd87e24969e782203c7684a23326..c1306de1f4ddf650c2ee66167559f27e4004e837 100644 (file)
@@ -543,6 +543,7 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
                struct nvmet_host *host);
 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
                u8 event_info, u8 log_page);
+bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
 
 #define NVMET_MIN_QUEUE_SIZE   16
 #define NVMET_MAX_QUEUE_SIZE   1024
index a5422e2c979addca1f219777b47f61f5817e302a..380f22ee3ebba3ef5fe50108b39ea78345c1f1ba 100644 (file)
@@ -348,6 +348,7 @@ static int nvmet_tcp_check_ddgst(struct nvmet_tcp_queue *queue, void *pdu)
        return 0;
 }
 
+/* If cmd buffers are NULL, no operation is performed */
 static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
 {
        kfree(cmd->iov);
@@ -1581,13 +1582,9 @@ static void nvmet_tcp_free_cmd_data_in_buffers(struct nvmet_tcp_queue *queue)
        struct nvmet_tcp_cmd *cmd = queue->cmds;
        int i;
 
-       for (i = 0; i < queue->nr_cmds; i++, cmd++) {
-               if (nvmet_tcp_need_data_in(cmd))
-                       nvmet_tcp_free_cmd_buffers(cmd);
-       }
-
-       if (!queue->nr_cmds && nvmet_tcp_need_data_in(&queue->connect))
-               nvmet_tcp_free_cmd_buffers(&queue->connect);
+       for (i = 0; i < queue->nr_cmds; i++, cmd++)
+               nvmet_tcp_free_cmd_buffers(cmd);
+       nvmet_tcp_free_cmd_buffers(&queue->connect);
 }
 
 static void nvmet_tcp_release_queue_work(struct work_struct *w)
index d376fa7114d1a024d63eba168a6809f0816acfba..029efe16f8cc29d04942ef4f420c37a25dc2f4b3 100644 (file)
@@ -43,7 +43,7 @@
 #define SCU614         0x614 /* Disable GPIO Internal Pull-Down #1 */
 #define SCU618         0x618 /* Disable GPIO Internal Pull-Down #2 */
 #define SCU61C         0x61c /* Disable GPIO Internal Pull-Down #3 */
-#define SCU620         0x620 /* Disable GPIO Internal Pull-Down #4 */
+#define SCU630         0x630 /* Disable GPIO Internal Pull-Down #4 */
 #define SCU634         0x634 /* Disable GPIO Internal Pull-Down #5 */
 #define SCU638         0x638 /* Disable GPIO Internal Pull-Down #6 */
 #define SCU690         0x690 /* Multi-function Pin Control #24 */
@@ -2495,38 +2495,38 @@ static struct aspeed_pin_config aspeed_g6_configs[] = {
        ASPEED_PULL_DOWN_PINCONF(D14, SCU61C, 0),
 
        /* GPIOS7 */
-       ASPEED_PULL_DOWN_PINCONF(T24, SCU620, 23),
+       ASPEED_PULL_DOWN_PINCONF(T24, SCU630, 23),
        /* GPIOS6 */
-       ASPEED_PULL_DOWN_PINCONF(P23, SCU620, 22),
+       ASPEED_PULL_DOWN_PINCONF(P23, SCU630, 22),
        /* GPIOS5 */
-       ASPEED_PULL_DOWN_PINCONF(P24, SCU620, 21),
+       ASPEED_PULL_DOWN_PINCONF(P24, SCU630, 21),
        /* GPIOS4 */
-       ASPEED_PULL_DOWN_PINCONF(R26, SCU620, 20),
+       ASPEED_PULL_DOWN_PINCONF(R26, SCU630, 20),
        /* GPIOS3*/
-       ASPEED_PULL_DOWN_PINCONF(R24, SCU620, 19),
+       ASPEED_PULL_DOWN_PINCONF(R24, SCU630, 19),
        /* GPIOS2 */
-       ASPEED_PULL_DOWN_PINCONF(T26, SCU620, 18),
+       ASPEED_PULL_DOWN_PINCONF(T26, SCU630, 18),
        /* GPIOS1 */
-       ASPEED_PULL_DOWN_PINCONF(T25, SCU620, 17),
+       ASPEED_PULL_DOWN_PINCONF(T25, SCU630, 17),
        /* GPIOS0 */
-       ASPEED_PULL_DOWN_PINCONF(R23, SCU620, 16),
+       ASPEED_PULL_DOWN_PINCONF(R23, SCU630, 16),
 
        /* GPIOR7 */
-       ASPEED_PULL_DOWN_PINCONF(U26, SCU620, 15),
+       ASPEED_PULL_DOWN_PINCONF(U26, SCU630, 15),
        /* GPIOR6 */
-       ASPEED_PULL_DOWN_PINCONF(W26, SCU620, 14),
+       ASPEED_PULL_DOWN_PINCONF(W26, SCU630, 14),
        /* GPIOR5 */
-       ASPEED_PULL_DOWN_PINCONF(T23, SCU620, 13),
+       ASPEED_PULL_DOWN_PINCONF(T23, SCU630, 13),
        /* GPIOR4 */
-       ASPEED_PULL_DOWN_PINCONF(U25, SCU620, 12),
+       ASPEED_PULL_DOWN_PINCONF(U25, SCU630, 12),
        /* GPIOR3*/
-       ASPEED_PULL_DOWN_PINCONF(V26, SCU620, 11),
+       ASPEED_PULL_DOWN_PINCONF(V26, SCU630, 11),
        /* GPIOR2 */
-       ASPEED_PULL_DOWN_PINCONF(V24, SCU620, 10),
+       ASPEED_PULL_DOWN_PINCONF(V24, SCU630, 10),
        /* GPIOR1 */
-       ASPEED_PULL_DOWN_PINCONF(U24, SCU620, 9),
+       ASPEED_PULL_DOWN_PINCONF(U24, SCU630, 9),
        /* GPIOR0 */
-       ASPEED_PULL_DOWN_PINCONF(V25, SCU620, 8),
+       ASPEED_PULL_DOWN_PINCONF(V25, SCU630, 8),
 
        /* GPIOX7 */
        ASPEED_PULL_DOWN_PINCONF(AB10, SCU634, 31),
index 6649357637ff337dc49c4e522287d2366dc36516..cffeb869130ddab486fb2ce00e4c1d5a27575875 100644 (file)
@@ -2124,13 +2124,7 @@ int pinctrl_enable(struct pinctrl_dev *pctldev)
 
        error = pinctrl_claim_hogs(pctldev);
        if (error) {
-               dev_err(pctldev->dev, "could not claim hogs: %i\n",
-                       error);
-               pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
-                                     pctldev->desc->npins);
-               mutex_destroy(&pctldev->mutex);
-               kfree(pctldev);
-
+               dev_err(pctldev->dev, "could not claim hogs: %i\n", error);
                return error;
        }
 
index df1efc2e5202591d5f9f23d95f6008853b91e046..6a94ecd6a8deaeb6b7fc14c6387987f52ead306a 100644 (file)
@@ -220,14 +220,16 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev)
        for (state = 0; ; state++) {
                /* Retrieve the pinctrl-* property */
                propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state);
-               if (!propname)
-                       return -ENOMEM;
+               if (!propname) {
+                       ret = -ENOMEM;
+                       goto err;
+               }
                prop = of_find_property(np, propname, &size);
                kfree(propname);
                if (!prop) {
                        if (state == 0) {
-                               of_node_put(np);
-                               return -ENODEV;
+                               ret = -ENODEV;
+                               goto err;
                        }
                        break;
                }
index ac97724c59bae9705e89c93246ec3f09580d663d..4e87f5b875c0e8f7089b9411c9b1610eaa2a8b9a 100644 (file)
@@ -231,6 +231,7 @@ static const unsigned int byt_score_pins_map[BYT_NGPIO_SCORE] = {
 /* SCORE groups */
 static const unsigned int byt_score_uart1_pins[] = { 70, 71, 72, 73 };
 static const unsigned int byt_score_uart2_pins[] = { 74, 75, 76, 77 };
+static const unsigned int byt_score_uart3_pins[] = { 57, 61 };
 
 static const unsigned int byt_score_pwm0_pins[] = { 94 };
 static const unsigned int byt_score_pwm1_pins[] = { 95 };
@@ -278,37 +279,38 @@ static const unsigned int byt_score_plt_clk5_pins[] = { 101 };
 static const unsigned int byt_score_smbus_pins[] = { 51, 52, 53 };
 
 static const struct intel_pingroup byt_score_groups[] = {
-       PIN_GROUP("uart1_grp", byt_score_uart1_pins, 1),
-       PIN_GROUP("uart2_grp", byt_score_uart2_pins, 1),
-       PIN_GROUP("pwm0_grp", byt_score_pwm0_pins, 1),
-       PIN_GROUP("pwm1_grp", byt_score_pwm1_pins, 1),
-       PIN_GROUP("ssp2_grp", byt_score_ssp2_pins, 1),
-       PIN_GROUP("sio_spi_grp", byt_score_sio_spi_pins, 1),
-       PIN_GROUP("i2c5_grp", byt_score_i2c5_pins, 1),
-       PIN_GROUP("i2c6_grp", byt_score_i2c6_pins, 1),
-       PIN_GROUP("i2c4_grp", byt_score_i2c4_pins, 1),
-       PIN_GROUP("i2c3_grp", byt_score_i2c3_pins, 1),
-       PIN_GROUP("i2c2_grp", byt_score_i2c2_pins, 1),
-       PIN_GROUP("i2c1_grp", byt_score_i2c1_pins, 1),
-       PIN_GROUP("i2c0_grp", byt_score_i2c0_pins, 1),
-       PIN_GROUP("ssp0_grp", byt_score_ssp0_pins, 1),
-       PIN_GROUP("ssp1_grp", byt_score_ssp1_pins, 1),
-       PIN_GROUP("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
-       PIN_GROUP("sdio_grp", byt_score_sdio_pins, 1),
-       PIN_GROUP("emmc_grp", byt_score_emmc_pins, 1),
-       PIN_GROUP("lpc_grp", byt_score_ilb_lpc_pins, 1),
-       PIN_GROUP("sata_grp", byt_score_sata_pins, 1),
-       PIN_GROUP("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
-       PIN_GROUP("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
-       PIN_GROUP("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
-       PIN_GROUP("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
-       PIN_GROUP("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
-       PIN_GROUP("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
-       PIN_GROUP("smbus_grp", byt_score_smbus_pins, 1),
+       PIN_GROUP_GPIO("uart1_grp", byt_score_uart1_pins, 1),
+       PIN_GROUP_GPIO("uart2_grp", byt_score_uart2_pins, 1),
+       PIN_GROUP_GPIO("uart3_grp", byt_score_uart3_pins, 1),
+       PIN_GROUP_GPIO("pwm0_grp", byt_score_pwm0_pins, 1),
+       PIN_GROUP_GPIO("pwm1_grp", byt_score_pwm1_pins, 1),
+       PIN_GROUP_GPIO("ssp2_grp", byt_score_ssp2_pins, 1),
+       PIN_GROUP_GPIO("sio_spi_grp", byt_score_sio_spi_pins, 1),
+       PIN_GROUP_GPIO("i2c5_grp", byt_score_i2c5_pins, 1),
+       PIN_GROUP_GPIO("i2c6_grp", byt_score_i2c6_pins, 1),
+       PIN_GROUP_GPIO("i2c4_grp", byt_score_i2c4_pins, 1),
+       PIN_GROUP_GPIO("i2c3_grp", byt_score_i2c3_pins, 1),
+       PIN_GROUP_GPIO("i2c2_grp", byt_score_i2c2_pins, 1),
+       PIN_GROUP_GPIO("i2c1_grp", byt_score_i2c1_pins, 1),
+       PIN_GROUP_GPIO("i2c0_grp", byt_score_i2c0_pins, 1),
+       PIN_GROUP_GPIO("ssp0_grp", byt_score_ssp0_pins, 1),
+       PIN_GROUP_GPIO("ssp1_grp", byt_score_ssp1_pins, 1),
+       PIN_GROUP_GPIO("sdcard_grp", byt_score_sdcard_pins, byt_score_sdcard_mux_values),
+       PIN_GROUP_GPIO("sdio_grp", byt_score_sdio_pins, 1),
+       PIN_GROUP_GPIO("emmc_grp", byt_score_emmc_pins, 1),
+       PIN_GROUP_GPIO("lpc_grp", byt_score_ilb_lpc_pins, 1),
+       PIN_GROUP_GPIO("sata_grp", byt_score_sata_pins, 1),
+       PIN_GROUP_GPIO("plt_clk0_grp", byt_score_plt_clk0_pins, 1),
+       PIN_GROUP_GPIO("plt_clk1_grp", byt_score_plt_clk1_pins, 1),
+       PIN_GROUP_GPIO("plt_clk2_grp", byt_score_plt_clk2_pins, 1),
+       PIN_GROUP_GPIO("plt_clk3_grp", byt_score_plt_clk3_pins, 1),
+       PIN_GROUP_GPIO("plt_clk4_grp", byt_score_plt_clk4_pins, 1),
+       PIN_GROUP_GPIO("plt_clk5_grp", byt_score_plt_clk5_pins, 1),
+       PIN_GROUP_GPIO("smbus_grp", byt_score_smbus_pins, 1),
 };
 
 static const char * const byt_score_uart_groups[] = {
-       "uart1_grp", "uart2_grp",
+       "uart1_grp", "uart2_grp", "uart3_grp",
 };
 static const char * const byt_score_pwm_groups[] = {
        "pwm0_grp", "pwm1_grp",
@@ -332,12 +334,14 @@ static const char * const byt_score_plt_clk_groups[] = {
 };
 static const char * const byt_score_smbus_groups[] = { "smbus_grp" };
 static const char * const byt_score_gpio_groups[] = {
-       "uart1_grp", "uart2_grp", "pwm0_grp", "pwm1_grp", "ssp0_grp",
-       "ssp1_grp", "ssp2_grp", "sio_spi_grp", "i2c0_grp", "i2c1_grp",
-       "i2c2_grp", "i2c3_grp", "i2c4_grp", "i2c5_grp", "i2c6_grp",
-       "sdcard_grp", "sdio_grp", "emmc_grp", "lpc_grp", "sata_grp",
-       "plt_clk0_grp", "plt_clk1_grp", "plt_clk2_grp", "plt_clk3_grp",
-       "plt_clk4_grp", "plt_clk5_grp", "smbus_grp",
+       "uart1_grp_gpio", "uart2_grp_gpio", "uart3_grp_gpio", "pwm0_grp_gpio",
+       "pwm1_grp_gpio", "ssp0_grp_gpio", "ssp1_grp_gpio", "ssp2_grp_gpio",
+       "sio_spi_grp_gpio", "i2c0_grp_gpio", "i2c1_grp_gpio", "i2c2_grp_gpio",
+       "i2c3_grp_gpio", "i2c4_grp_gpio", "i2c5_grp_gpio", "i2c6_grp_gpio",
+       "sdcard_grp_gpio", "sdio_grp_gpio", "emmc_grp_gpio", "lpc_grp_gpio",
+       "sata_grp_gpio", "plt_clk0_grp_gpio", "plt_clk1_grp_gpio",
+       "plt_clk2_grp_gpio", "plt_clk3_grp_gpio", "plt_clk4_grp_gpio",
+       "plt_clk5_grp_gpio", "smbus_grp_gpio",
 };
 
 static const struct intel_function byt_score_functions[] = {
@@ -456,8 +460,8 @@ static const struct intel_pingroup byt_sus_groups[] = {
        PIN_GROUP("usb_oc_grp_gpio", byt_sus_usb_over_current_pins, byt_sus_usb_over_current_gpio_mode_values),
        PIN_GROUP("usb_ulpi_grp_gpio", byt_sus_usb_ulpi_pins, byt_sus_usb_ulpi_gpio_mode_values),
        PIN_GROUP("pcu_spi_grp_gpio", byt_sus_pcu_spi_pins, byt_sus_pcu_spi_gpio_mode_values),
-       PIN_GROUP("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
-       PIN_GROUP("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
+       PIN_GROUP_GPIO("pmu_clk1_grp", byt_sus_pmu_clk1_pins, 1),
+       PIN_GROUP_GPIO("pmu_clk2_grp", byt_sus_pmu_clk2_pins, 1),
 };
 
 static const char * const byt_sus_usb_groups[] = {
@@ -469,7 +473,7 @@ static const char * const byt_sus_pmu_clk_groups[] = {
 };
 static const char * const byt_sus_gpio_groups[] = {
        "usb_oc_grp_gpio", "usb_ulpi_grp_gpio", "pcu_spi_grp_gpio",
-       "pmu_clk1_grp", "pmu_clk2_grp",
+       "pmu_clk1_grp_gpio", "pmu_clk2_grp_gpio",
 };
 
 static const struct intel_function byt_sus_functions[] = {
index fde65e18cd145ecb1eb809f87ecec289067e44e8..6981e2fab93f34998b737ac39446c4eed5646923 100644 (file)
@@ -179,6 +179,10 @@ struct intel_community {
                .modes = __builtin_choose_expr(__builtin_constant_p((m)), NULL, (m)),   \
        }
 
+#define PIN_GROUP_GPIO(n, p, m)                                                \
+        PIN_GROUP(n, p, m),                                            \
+        PIN_GROUP(n "_gpio", p, 0)
+
 #define FUNCTION(n, g)                                                 \
        {                                                               \
                .func = PINCTRL_PINFUNCTION((n), (g), ARRAY_SIZE(g)),   \
index b6bc31abd2b068695dcfd5025c0f31ea0b3c679e..b19bc391705ee079939f34ab03d554160d97c234 100644 (file)
@@ -165,20 +165,21 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
                err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SR, &ret);
                break;
        case PIN_CONFIG_INPUT_ENABLE:
-       case PIN_CONFIG_OUTPUT_ENABLE:
+               err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_IES, &ret);
+               if (!ret)
+                       err = -EINVAL;
+               break;
+       case PIN_CONFIG_OUTPUT:
                err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
                if (err)
                        break;
-               /*     CONFIG     Current direction return value
-                * -------------  ----------------- ----------------------
-                * OUTPUT_ENABLE       output       1 (= HW value)
-                *                     input        0 (= HW value)
-                * INPUT_ENABLE        output       0 (= reverse HW value)
-                *                     input        1 (= reverse HW value)
-                */
-               if (param == PIN_CONFIG_INPUT_ENABLE)
-                       ret = !ret;
 
+               if (!ret) {
+                       err = -EINVAL;
+                       break;
+               }
+
+               err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DO, &ret);
                break;
        case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
                err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &ret);
@@ -193,6 +194,8 @@ static int mtk_pinconf_get(struct pinctrl_dev *pctldev,
                }
 
                err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_SMT, &ret);
+               if (!ret)
+                       err = -EINVAL;
                break;
        case PIN_CONFIG_DRIVE_STRENGTH:
                if (!hw->soc->drive_get)
@@ -281,26 +284,9 @@ static int mtk_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
                        break;
                err = hw->soc->bias_set_combo(hw, desc, 0, arg);
                break;
-       case PIN_CONFIG_OUTPUT_ENABLE:
-               err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_SMT,
-                                      MTK_DISABLE);
-               /* Keep set direction to consider the case that a GPIO pin
-                *  does not have SMT control
-                */
-               if (err != -ENOTSUPP)
-                       break;
-
-               err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
-                                      MTK_OUTPUT);
-               break;
        case PIN_CONFIG_INPUT_ENABLE:
                /* regard all non-zero value as enable */
                err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_IES, !!arg);
-               if (err)
-                       break;
-
-               err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_DIR,
-                                      MTK_INPUT);
                break;
        case PIN_CONFIG_SLEW_RATE:
                /* regard all non-zero value as enable */
index 79f5d753d7e1a53df0da9bb0446bf8fdfbd3042c..50a87d9618a8e8e078424db33c7e13a7c7ed1dc6 100644 (file)
@@ -250,7 +250,7 @@ static const unsigned int pdm_dclk_x_pins[]         = { GPIOX_10 };
 static const unsigned int pdm_din2_a_pins[]            = { GPIOA_6 };
 static const unsigned int pdm_din1_a_pins[]            = { GPIOA_7 };
 static const unsigned int pdm_din0_a_pins[]            = { GPIOA_8 };
-static const unsigned int pdm_dclk_pins[]              = { GPIOA_9 };
+static const unsigned int pdm_dclk_a_pins[]            = { GPIOA_9 };
 
 /* gen_clk */
 static const unsigned int gen_clk_x_pins[]             = { GPIOX_7 };
@@ -591,7 +591,7 @@ static struct meson_pmx_group meson_a1_periphs_groups[] = {
        GROUP(pdm_din2_a,               3),
        GROUP(pdm_din1_a,               3),
        GROUP(pdm_din0_a,               3),
-       GROUP(pdm_dclk,                 3),
+       GROUP(pdm_dclk_a,               3),
        GROUP(pwm_c_a,                  3),
        GROUP(pwm_b_a,                  3),
 
@@ -755,7 +755,7 @@ static const char * const spi_a_groups[] = {
 
 static const char * const pdm_groups[] = {
        "pdm_din0_x", "pdm_din1_x", "pdm_din2_x", "pdm_dclk_x", "pdm_din2_a",
-       "pdm_din1_a", "pdm_din0_a", "pdm_dclk",
+       "pdm_din1_a", "pdm_din0_a", "pdm_dclk_a",
 };
 
 static const char * const gen_clk_groups[] = {
index eb5a8c65426061fec78bced9cf66243debbb561f..20425afc6b331b336c314d5a3f4c6dc9b813382d 100644 (file)
@@ -2045,7 +2045,9 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
 
        for (unsigned int i = 0; i < RZG2L_TINT_MAX_INTERRUPT; i++) {
                struct irq_data *data;
+               unsigned long flags;
                unsigned int virq;
+               int ret;
 
                if (!pctrl->hwirq[i])
                        continue;
@@ -2063,8 +2065,18 @@ static void rzg2l_gpio_irq_restore(struct rzg2l_pinctrl *pctrl)
                        continue;
                }
 
-               if (!irqd_irq_disabled(data))
+               /*
+                * This has to be atomically executed to protect against a concurrent
+                * interrupt.
+                */
+               raw_spin_lock_irqsave(&pctrl->lock.rlock, flags);
+               ret = rzg2l_gpio_irq_set_type(data, irqd_get_trigger_type(data));
+               if (!ret && !irqd_irq_disabled(data))
                        rzg2l_gpio_irq_enable(data);
+               raw_spin_unlock_irqrestore(&pctrl->lock.rlock, flags);
+
+               if (ret)
+                       dev_crit(pctrl->dev, "Failed to set IRQ type for virq=%u\n", virq);
        }
 }
 
index 30951f7131cd98bfdaffb70b2aa30ee3ceb7dbdd..1accdaaf282c5331495e51a6bd71a9203a7c0200 100644 (file)
@@ -721,6 +721,7 @@ static struct miscdevice isst_if_char_driver = {
 static const struct x86_cpu_id hpm_cpu_ids[] = {
        X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D,     NULL),
        X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X,     NULL),
+       X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT,      NULL),
        X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X,    NULL),
        {}
 };
index 1305cba61edd4b957313d7d86fb05158975b3086..aca123783efccdcb73391214d20c3722975222ca 100644 (file)
@@ -588,7 +588,7 @@ static const struct regulator_ops mt6360_chg_otg_ops = {
 };
 
 static const struct regulator_desc mt6360_otg_rdesc = {
-       .of_match = "usb-otg-vbus",
+       .of_match = "usb-otg-vbus-regulator",
        .name = "usb-otg-vbus",
        .ops = &mt6360_chg_otg_ops,
        .owner = THIS_MODULE,
index c345a77f9f78c0a4a03f5a13a7151901b9945a03..e4dbacd50a437df3562b04d485ec38e11a89b216 100644 (file)
@@ -192,6 +192,7 @@ static const int rt9455_voreg_values[] = {
        4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000, 4450000
 };
 
+#if IS_ENABLED(CONFIG_USB_PHY)
 /*
  * When the charger is in boost mode, REG02[7:2] represent boost output
  * voltage.
@@ -207,6 +208,7 @@ static const int rt9455_boost_voltage_values[] = {
        5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
        5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000, 5600000,
 };
+#endif
 
 /* REG07[3:0] (VMREG) in uV */
 static const int rt9455_vmreg_values[] = {
index fe7ae0f3f46af985a87f4a0b5c33c3fd09f6b1a8..5ab1a0befe12f791389911651d8548fb3611b6f8 100644 (file)
@@ -352,6 +352,9 @@ void *regulator_irq_helper(struct device *dev,
 
        h->irq = irq;
        h->desc = *d;
+       h->desc.name = devm_kstrdup(dev, d->name, GFP_KERNEL);
+       if (!h->desc.name)
+               return ERR_PTR(-ENOMEM);
 
        ret = init_rdev_state(dev, h, rdev, common_errs, per_rdev_errs,
                              rdev_amount);
index ad6587a378d09cc52515475f96512ed5627880be..24cc9fc94e900a43a8bfa8c30c364cedb2b0b948 100644 (file)
@@ -319,15 +319,15 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
        }
 }
 
-#define MT6360_REGULATOR_DESC(_name, _sname, ereg, emask, vreg,        vmask,  \
-                             mreg, mmask, streg, stmask, vranges,      \
-                             vcnts, offon_delay, irq_tbls)             \
+#define MT6360_REGULATOR_DESC(match, _name, _sname, ereg, emask, vreg, \
+                             vmask, mreg, mmask, streg, stmask,        \
+                             vranges, vcnts, offon_delay, irq_tbls)    \
 {                                                                      \
        .desc = {                                                       \
                .name = #_name,                                         \
                .supply_name = #_sname,                                 \
                .id =  MT6360_REGULATOR_##_name,                        \
-               .of_match = of_match_ptr(#_name),                       \
+               .of_match = of_match_ptr(match),                        \
                .regulators_node = of_match_ptr("regulator"),           \
                .of_map_mode = mt6360_regulator_of_map_mode,            \
                .owner = THIS_MODULE,                                   \
@@ -351,21 +351,29 @@ static unsigned int mt6360_regulator_of_map_mode(unsigned int hw_mode)
 }
 
 static const struct mt6360_regulator_desc mt6360_regulator_descs[] =  {
-       MT6360_REGULATOR_DESC(BUCK1, BUCK1_VIN, 0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
+       MT6360_REGULATOR_DESC("buck1", BUCK1, BUCK1_VIN,
+                             0x117, 0x40, 0x110, 0xff, 0x117, 0x30, 0x117, 0x04,
                              buck_vout_ranges, 256, 0, buck1_irq_tbls),
-       MT6360_REGULATOR_DESC(BUCK2, BUCK2_VIN, 0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
+       MT6360_REGULATOR_DESC("buck2", BUCK2, BUCK2_VIN,
+                             0x127, 0x40, 0x120, 0xff, 0x127, 0x30, 0x127, 0x04,
                              buck_vout_ranges, 256, 0, buck2_irq_tbls),
-       MT6360_REGULATOR_DESC(LDO6, LDO_VIN3, 0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
+       MT6360_REGULATOR_DESC("ldo6", LDO6, LDO_VIN3,
+                             0x137, 0x40, 0x13B, 0xff, 0x137, 0x30, 0x137, 0x04,
                              ldo_vout_ranges1, 256, 0, ldo6_irq_tbls),
-       MT6360_REGULATOR_DESC(LDO7, LDO_VIN3, 0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
+       MT6360_REGULATOR_DESC("ldo7", LDO7, LDO_VIN3,
+                             0x131, 0x40, 0x135, 0xff, 0x131, 0x30, 0x131, 0x04,
                              ldo_vout_ranges1, 256, 0, ldo7_irq_tbls),
-       MT6360_REGULATOR_DESC(LDO1, LDO_VIN1, 0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
+       MT6360_REGULATOR_DESC("ldo1", LDO1, LDO_VIN1,
+                             0x217, 0x40, 0x21B, 0xff, 0x217, 0x30, 0x217, 0x04,
                              ldo_vout_ranges2, 256, 0, ldo1_irq_tbls),
-       MT6360_REGULATOR_DESC(LDO2, LDO_VIN1, 0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
+       MT6360_REGULATOR_DESC("ldo2", LDO2, LDO_VIN1,
+                             0x211, 0x40, 0x215, 0xff, 0x211, 0x30, 0x211, 0x04,
                              ldo_vout_ranges2, 256, 0, ldo2_irq_tbls),
-       MT6360_REGULATOR_DESC(LDO3, LDO_VIN1, 0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
+       MT6360_REGULATOR_DESC("ldo3", LDO3, LDO_VIN1,
+                             0x205, 0x40, 0x209, 0xff, 0x205, 0x30, 0x205, 0x04,
                              ldo_vout_ranges2, 256, 100, ldo3_irq_tbls),
-       MT6360_REGULATOR_DESC(LDO5, LDO_VIN2, 0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
+       MT6360_REGULATOR_DESC("ldo5", LDO5, LDO_VIN2,
+                             0x20B, 0x40, 0x20F, 0x7f, 0x20B, 0x30, 0x20B, 0x04,
                              ldo_vout_ranges3, 128, 100, ldo5_irq_tbls),
 };
 
index 656fe330d38f0adbc953b8d42861088a6eaa757b..063e12c08e75f70c002e6f3e859a8bba5e8ef344 100644 (file)
@@ -140,6 +140,7 @@ static const struct of_device_id qcom_refgen_match_table[] = {
        { .compatible = "qcom,sm8250-refgen-regulator", .data = &sm8250_refgen_desc },
        { }
 };
+MODULE_DEVICE_TABLE(of, qcom_refgen_match_table);
 
 static struct platform_driver qcom_refgen_driver = {
        .probe = qcom_refgen_probe,
index 086da36abc0b4917a26fc78745dfdb8a48b5cfd5..4955616517ce9caa22f652e9370e7d517098b378 100644 (file)
@@ -84,6 +84,7 @@ static const struct of_device_id regulator_ipq4019_of_match[] = {
        { .compatible = "qcom,vqmmc-ipq4019-regulator", },
        {},
 };
+MODULE_DEVICE_TABLE(of, regulator_ipq4019_of_match);
 
 static struct platform_driver ipq4019_regulator_driver = {
        .probe = ipq4019_regulator_probe,
index 37173cb0f5f5a222b936029b8ff8e7997c401d4d..c57694be9bd32ef8cb9dc8ad794379ef2da44640 100644 (file)
@@ -162,7 +162,8 @@ struct raw3270_request *raw3270_request_alloc(size_t size)
        /*
         * Setup ccw.
         */
-       rq->ccw.cda = virt_to_dma32(rq->buffer);
+       if (rq->buffer)
+               rq->ccw.cda = virt_to_dma32(rq->buffer);
        rq->ccw.flags = CCW_FLAG_SLI;
 
        return rq;
@@ -188,7 +189,8 @@ int raw3270_request_reset(struct raw3270_request *rq)
                return -EBUSY;
        rq->ccw.cmd_code = 0;
        rq->ccw.count = 0;
-       rq->ccw.cda = virt_to_dma32(rq->buffer);
+       if (rq->buffer)
+               rq->ccw.cda = virt_to_dma32(rq->buffer);
        rq->ccw.flags = CCW_FLAG_SLI;
        rq->rescnt = 0;
        rq->rc = 0;
index 8613fa937237bde02368523577a485b62b07c5dc..a2e771ebae8ebd55e1cb841f76b44a69ebb2bea7 100644 (file)
@@ -95,7 +95,7 @@ static ssize_t crw_inject_write(struct file *file, const char __user *buf,
                return -EINVAL;
        }
 
-       buffer = vmemdup_user(buf, lbuf);
+       buffer = memdup_user_nul(buf, lbuf);
        if (IS_ERR(buffer))
                return -ENOMEM;
 
index 0a3a678ffc7eedb237f3bc72e21ffe4c076c1444..6087547328ce91271eeab7a13c02059299c0c39a 100644 (file)
@@ -658,7 +658,7 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
                               (int)prepcblk->ccp_rtcode,
                               (int)prepcblk->ccp_rscode);
                if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
-                       rc = -EAGAIN;
+                       rc = -EBUSY;
                else
                        rc = -EIO;
                goto out;
@@ -1263,7 +1263,7 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
                               (int)prepcblk->ccp_rtcode,
                               (int)prepcblk->ccp_rscode);
                if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
-                       rc = -EAGAIN;
+                       rc = -EBUSY;
                else
                        rc = -EIO;
                goto out;
@@ -1426,7 +1426,7 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
                               (int)prepcblk->ccp_rtcode,
                               (int)prepcblk->ccp_rscode);
                if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
-                       rc = -EAGAIN;
+                       rc = -EBUSY;
                else
                        rc = -EIO;
                goto out;
index eb7f5489ccf95962034a2a135dbc30fa83599973..9bcf8fc69ebe428bff07458406f70be0407d0b39 100644 (file)
@@ -556,13 +556,29 @@ static int check_reply_pl(const u8 *pl, const char *func)
        pl += 2;
        ret = *((u32 *)pl);
        if (ret != 0) {
-               ZCRYPT_DBF_ERR("%s return value 0x%04x != 0\n", func, ret);
+               ZCRYPT_DBF_ERR("%s return value 0x%08x != 0\n", func, ret);
                return -EIO;
        }
 
        return 0;
 }
 
+/* Check ep11 reply cprb, return 0 or suggested errno value. */
+static int check_reply_cprb(const struct ep11_cprb *rep, const char *func)
+{
+       /* check ep11 reply return code field */
+       if (rep->ret_code) {
+               ZCRYPT_DBF_ERR("%s ep11 reply ret_code=0x%08x\n", __func__,
+                              rep->ret_code);
+               if (rep->ret_code == 0x000c0003)
+                       return -EBUSY;
+               else
+                       return -EIO;
+       }
+
+       return 0;
+}
+
 /*
  * Helper function which does an ep11 query with given query type.
  */
@@ -627,6 +643,12 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
                goto out;
        }
 
+       /* check ep11 reply cprb */
+       rc = check_reply_cprb(rep, __func__);
+       if (rc)
+               goto out;
+
+       /* check payload */
        rc = check_reply_pl((u8 *)rep_pl, __func__);
        if (rc)
                goto out;
@@ -877,6 +899,12 @@ static int _ep11_genaeskey(u16 card, u16 domain,
                goto out;
        }
 
+       /* check ep11 reply cprb */
+       rc = check_reply_cprb(rep, __func__);
+       if (rc)
+               goto out;
+
+       /* check payload */
        rc = check_reply_pl((u8 *)rep_pl, __func__);
        if (rc)
                goto out;
@@ -1028,6 +1056,12 @@ static int ep11_cryptsingle(u16 card, u16 domain,
                goto out;
        }
 
+       /* check ep11 reply cprb */
+       rc = check_reply_cprb(rep, __func__);
+       if (rc)
+               goto out;
+
+       /* check payload */
        rc = check_reply_pl((u8 *)rep_pl, __func__);
        if (rc)
                goto out;
@@ -1185,6 +1219,12 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
                goto out;
        }
 
+       /* check ep11 reply cprb */
+       rc = check_reply_cprb(rep, __func__);
+       if (rc)
+               goto out;
+
+       /* check payload */
        rc = check_reply_pl((u8 *)rep_pl, __func__);
        if (rc)
                goto out;
@@ -1339,6 +1379,12 @@ static int _ep11_wrapkey(u16 card, u16 domain,
                goto out;
        }
 
+       /* check ep11 reply cprb */
+       rc = check_reply_cprb(rep, __func__);
+       if (rc)
+               goto out;
+
+       /* check payload */
        rc = check_reply_pl((u8 *)rep_pl, __func__);
        if (rc)
                goto out;
index f0b8b709649f29691c3d0976b8a1156da1f6bd81..a3adaec5504e45384eeedf8f82ee160317fcbbf5 100644 (file)
@@ -364,30 +364,33 @@ out:
        return rc;
 }
 
+static void qeth_free_cq(struct qeth_card *card)
+{
+       if (card->qdio.c_q) {
+               qeth_free_qdio_queue(card->qdio.c_q);
+               card->qdio.c_q = NULL;
+       }
+}
+
 static int qeth_alloc_cq(struct qeth_card *card)
 {
        if (card->options.cq == QETH_CQ_ENABLED) {
                QETH_CARD_TEXT(card, 2, "cqon");
-               card->qdio.c_q = qeth_alloc_qdio_queue();
                if (!card->qdio.c_q) {
-                       dev_err(&card->gdev->dev, "Failed to create completion queue\n");
-                       return -ENOMEM;
+                       card->qdio.c_q = qeth_alloc_qdio_queue();
+                       if (!card->qdio.c_q) {
+                               dev_err(&card->gdev->dev,
+                                       "Failed to create completion queue\n");
+                               return -ENOMEM;
+                       }
                }
        } else {
                QETH_CARD_TEXT(card, 2, "nocq");
-               card->qdio.c_q = NULL;
+               qeth_free_cq(card);
        }
        return 0;
 }
 
-static void qeth_free_cq(struct qeth_card *card)
-{
-       if (card->qdio.c_q) {
-               qeth_free_qdio_queue(card->qdio.c_q);
-               card->qdio.c_q = NULL;
-       }
-}
-
 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
                                                        int delayed)
 {
@@ -2628,6 +2631,10 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
 
        QETH_CARD_TEXT(card, 2, "allcqdbf");
 
+       /* completion */
+       if (qeth_alloc_cq(card))
+               goto out_err;
+
        if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
                QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
                return 0;
@@ -2663,10 +2670,6 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
                queue->priority = QETH_QIB_PQUE_PRIO_DEFAULT;
        }
 
-       /* completion */
-       if (qeth_alloc_cq(card))
-               goto out_freeoutq;
-
        return 0;
 
 out_freeoutq:
@@ -2677,6 +2680,8 @@ out_freeoutq:
        qeth_free_buffer_pool(card);
 out_buffer_pool:
        atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
+       qeth_free_cq(card);
+out_err:
        return -ENOMEM;
 }
 
@@ -2684,11 +2689,12 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
 {
        int i, j;
 
+       qeth_free_cq(card);
+
        if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
                QETH_QDIO_UNINITIALIZED)
                return;
 
-       qeth_free_cq(card);
        for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
                if (card->qdio.in_q->bufs[j].rx_skb) {
                        consume_skb(card->qdio.in_q->bufs[j].rx_skb);
@@ -3742,24 +3748,11 @@ static void qeth_qdio_poll(struct ccw_device *cdev, unsigned long card_ptr)
 
 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
 {
-       int rc;
-
-       if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
-               rc = -1;
-               goto out;
-       } else {
-               if (card->options.cq == cq) {
-                       rc = 0;
-                       goto out;
-               }
-
-               qeth_free_qdio_queues(card);
-               card->options.cq = cq;
-               rc = 0;
-       }
-out:
-       return rc;
+       if (card->options.cq == QETH_CQ_NOTAVAILABLE)
+               return -1;
 
+       card->options.cq = cq;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(qeth_configure_cq);
 
index 58fdf679341dc64ee1768f1d09dc7ce4d549b4bd..65cdc8b77e358546fd1768fe4ec1bb4588c4b692 100644 (file)
@@ -3120,6 +3120,7 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
 {
        struct scsi_device *sdp = sdkp->device;
        const struct scsi_io_group_descriptor *desc, *start, *end;
+       u16 permanent_stream_count_old;
        struct scsi_sense_hdr sshdr;
        struct scsi_mode_data data;
        int res;
@@ -3140,12 +3141,13 @@ static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
        for (desc = start; desc < end; desc++)
                if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
                        break;
+       permanent_stream_count_old = sdkp->permanent_stream_count;
        sdkp->permanent_stream_count = desc - start;
        if (sdkp->rscs && sdkp->permanent_stream_count < 2)
                sd_printk(KERN_INFO, sdkp,
                          "Unexpected: RSCS has been set and the permanent stream count is %u\n",
                          sdkp->permanent_stream_count);
-       else if (sdkp->permanent_stream_count)
+       else if (sdkp->permanent_stream_count != permanent_stream_count_old)
                sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
                          sdkp->permanent_stream_count);
 }
index 7cc219d78551adbcd99e10e4d59f50653276cac1..e358ac5b4509777e0827630e2f6e58c385f9a54c 100644 (file)
@@ -623,7 +623,7 @@ static int spi_engine_probe(struct platform_device *pdev)
 
        version = readl(spi_engine->base + ADI_AXI_REG_VERSION);
        if (ADI_AXI_PCORE_VER_MAJOR(version) != 1) {
-               dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%c\n",
+               dev_err(&pdev->dev, "Unsupported peripheral version %u.%u.%u\n",
                        ADI_AXI_PCORE_VER_MAJOR(version),
                        ADI_AXI_PCORE_VER_MINOR(version),
                        ADI_AXI_PCORE_VER_PATCH(version));
index 35ef5e8e2ffd253ce16ea0594b2cabcf579cff77..77e9738e42f60ec844c74f09dddf5d90e898d87d 100644 (file)
@@ -151,8 +151,6 @@ static const struct debugfs_reg32 hisi_spi_regs[] = {
        HISI_SPI_DBGFS_REG("ENR", HISI_SPI_ENR),
        HISI_SPI_DBGFS_REG("FIFOC", HISI_SPI_FIFOC),
        HISI_SPI_DBGFS_REG("IMR", HISI_SPI_IMR),
-       HISI_SPI_DBGFS_REG("DIN", HISI_SPI_DIN),
-       HISI_SPI_DBGFS_REG("DOUT", HISI_SPI_DOUT),
        HISI_SPI_DBGFS_REG("SR", HISI_SPI_SR),
        HISI_SPI_DBGFS_REG("RISR", HISI_SPI_RISR),
        HISI_SPI_DBGFS_REG("ISR", HISI_SPI_ISR),
index ff75838c1b5dfa44fccca525c3a8a334051167fb..a2c467d9e92f59c46eea6ec78b3b12a9ae5a6caf 100644 (file)
@@ -4523,6 +4523,7 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
                wait_for_completion(&done);
                status = message->status;
        }
+       message->complete = NULL;
        message->context = NULL;
 
        return status;
index d78d54ae2605e8ab3050dd7a1e68fb13688a78c5..5693cc8b231aacdcf0a90f363a89e15f7bcd93f8 100644 (file)
@@ -139,11 +139,13 @@ struct tz_episode {
  * we keep track of the current position in the history array.
  *
  * @tz_episodes: a list of thermal mitigation episodes
+ * @tz: thermal zone this object belongs to
  * @trips_crossed: an array of trip points crossed by id
  * @nr_trips: the number of trip points currently being crossed
  */
 struct tz_debugfs {
        struct list_head tz_episodes;
+       struct thermal_zone_device *tz;
        int *trips_crossed;
        int nr_trips;
 };
@@ -503,15 +505,23 @@ void thermal_debug_cdev_add(struct thermal_cooling_device *cdev)
  */
 void thermal_debug_cdev_remove(struct thermal_cooling_device *cdev)
 {
-       struct thermal_debugfs *thermal_dbg = cdev->debugfs;
+       struct thermal_debugfs *thermal_dbg;
 
-       if (!thermal_dbg)
+       mutex_lock(&cdev->lock);
+
+       thermal_dbg = cdev->debugfs;
+       if (!thermal_dbg) {
+               mutex_unlock(&cdev->lock);
                return;
+       }
+
+       cdev->debugfs = NULL;
+
+       mutex_unlock(&cdev->lock);
 
        mutex_lock(&thermal_dbg->lock);
 
        thermal_debugfs_cdev_clear(&thermal_dbg->cdev_dbg);
-       cdev->debugfs = NULL;
 
        mutex_unlock(&thermal_dbg->lock);
 
@@ -716,8 +726,7 @@ out:
 
 static void *tze_seq_start(struct seq_file *s, loff_t *pos)
 {
-       struct thermal_zone_device *tz = s->private;
-       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct thermal_debugfs *thermal_dbg = s->private;
        struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
 
        mutex_lock(&thermal_dbg->lock);
@@ -727,8 +736,7 @@ static void *tze_seq_start(struct seq_file *s, loff_t *pos)
 
 static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
 {
-       struct thermal_zone_device *tz = s->private;
-       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct thermal_debugfs *thermal_dbg = s->private;
        struct tz_debugfs *tz_dbg = &thermal_dbg->tz_dbg;
 
        return seq_list_next(v, &tz_dbg->tz_episodes, pos);
@@ -736,15 +744,15 @@ static void *tze_seq_next(struct seq_file *s, void *v, loff_t *pos)
 
 static void tze_seq_stop(struct seq_file *s, void *v)
 {
-       struct thermal_zone_device *tz = s->private;
-       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct thermal_debugfs *thermal_dbg = s->private;
 
        mutex_unlock(&thermal_dbg->lock);
 }
 
 static int tze_seq_show(struct seq_file *s, void *v)
 {
-       struct thermal_zone_device *tz = s->private;
+       struct thermal_debugfs *thermal_dbg = s->private;
+       struct thermal_zone_device *tz = thermal_dbg->tz_dbg.tz;
        struct thermal_trip *trip;
        struct tz_episode *tze;
        const char *type;
@@ -810,6 +818,8 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
 
        tz_dbg = &thermal_dbg->tz_dbg;
 
+       tz_dbg->tz = tz;
+
        tz_dbg->trips_crossed = kzalloc(sizeof(int) * tz->num_trips, GFP_KERNEL);
        if (!tz_dbg->trips_crossed) {
                thermal_debugfs_remove_id(thermal_dbg);
@@ -818,23 +828,44 @@ void thermal_debug_tz_add(struct thermal_zone_device *tz)
 
        INIT_LIST_HEAD(&tz_dbg->tz_episodes);
 
-       debugfs_create_file("mitigations", 0400, thermal_dbg->d_top, tz, &tze_fops);
+       debugfs_create_file("mitigations", 0400, thermal_dbg->d_top,
+                           thermal_dbg, &tze_fops);
 
        tz->debugfs = thermal_dbg;
 }
 
 void thermal_debug_tz_remove(struct thermal_zone_device *tz)
 {
-       struct thermal_debugfs *thermal_dbg = tz->debugfs;
+       struct thermal_debugfs *thermal_dbg;
+       struct tz_episode *tze, *tmp;
+       struct tz_debugfs *tz_dbg;
+       int *trips_crossed;
 
-       if (!thermal_dbg)
+       mutex_lock(&tz->lock);
+
+       thermal_dbg = tz->debugfs;
+       if (!thermal_dbg) {
+               mutex_unlock(&tz->lock);
                return;
+       }
+
+       tz->debugfs = NULL;
+
+       mutex_unlock(&tz->lock);
+
+       tz_dbg = &thermal_dbg->tz_dbg;
 
        mutex_lock(&thermal_dbg->lock);
 
-       tz->debugfs = NULL;
+       trips_crossed = tz_dbg->trips_crossed;
+
+       list_for_each_entry_safe(tze, tmp, &tz_dbg->tz_episodes, node) {
+               list_del(&tze->node);
+               kfree(tze);
+       }
 
        mutex_unlock(&thermal_dbg->lock);
 
        thermal_debugfs_remove_id(thermal_dbg);
+       kfree(trips_crossed);
 }
index c60794264da2898b4d8d64eb2cf6f94ac90afb1b..45cb8149d374c2878e8c607282ca4e5e7875f063 100644 (file)
@@ -57,13 +57,14 @@ static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_n
        bp->v.seq               = cpu_to_le64(f->cookie);
        bp->v.sectors_written   = 0;
        bp->v.flags             = 0;
+       bp->v.sectors_written   = cpu_to_le16(f->sectors_written);
        bp->v.min_key           = f->min_key;
        SET_BTREE_PTR_RANGE_UPDATED(&bp->v, f->range_updated);
        memcpy(bp->v.start, f->ptrs, sizeof(struct bch_extent_ptr) * f->nr_ptrs);
 }
 
 static bool found_btree_node_is_readable(struct btree_trans *trans,
-                                        const struct found_btree_node *f)
+                                        struct found_btree_node *f)
 {
        struct { __BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX); } k;
 
@@ -71,8 +72,10 @@ static bool found_btree_node_is_readable(struct btree_trans *trans,
 
        struct btree *b = bch2_btree_node_get_noiter(trans, &k.k, f->btree_id, f->level, false);
        bool ret = !IS_ERR_OR_NULL(b);
-       if (ret)
+       if (ret) {
+               f->sectors_written = b->written;
                six_unlock_read(&b->c.lock);
+       }
 
        /*
         * We might update this node's range; if that happens, we need the node
index abb7b27d556a9ff0f09e797494437a513122d8d3..5cfaeb5ac831b6396399d71858d2934e7d63c544 100644 (file)
@@ -9,6 +9,7 @@ struct found_btree_node {
        bool                    overwritten:1;
        u8                      btree_id;
        u8                      level;
+       unsigned                sectors_written;
        u32                     seq;
        u64                     cookie;
 
index 941401a210f56993359548e51b5095d0db45e691..82f179258867b7b1b6e5f21905f0bc3d3eaee41c 100644 (file)
@@ -525,7 +525,6 @@ int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
                        "different types of data in same bucket: %s, %s",
                        bch2_data_type_str(g->data_type),
                        bch2_data_type_str(data_type))) {
-               BUG();
                ret = -EIO;
                goto err;
        }
@@ -629,7 +628,6 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
                        bch2_data_type_str(ptr_data_type),
                        (printbuf_reset(&buf),
                         bch2_bkey_val_to_text(&buf, c, k), buf.buf));
-               BUG();
                ret = -EIO;
                goto err;
        }
index ca4a066e9a5428aa68f88d77f59e9c365a580d6c..0f95d7fb5ec0bddf755299a70908a3520c307f69 100644 (file)
@@ -606,7 +606,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
                       struct bkey_s new,
                       unsigned flags)
 {
-       s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
+       s64 nr = (s64) bkey_is_inode(new.k) - (s64) bkey_is_inode(old.k);
 
        if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
                if (nr) {
index 55f3ba6a831ca194e2d8405dbf7caa60fbd81dfc..0493272a7668ed0eb0104284e572503855645ca8 100644 (file)
@@ -3758,15 +3758,43 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
                goto drop_write;
        }
 
-       down_write(&fs_info->subvol_sem);
-
        switch (sa->cmd) {
        case BTRFS_QUOTA_CTL_ENABLE:
        case BTRFS_QUOTA_CTL_ENABLE_SIMPLE_QUOTA:
+               down_write(&fs_info->subvol_sem);
                ret = btrfs_quota_enable(fs_info, sa);
+               up_write(&fs_info->subvol_sem);
                break;
        case BTRFS_QUOTA_CTL_DISABLE:
+               /*
+                * Lock the cleaner mutex to prevent races with concurrent
+                * relocation, because relocation may be building backrefs for
+                * blocks of the quota root while we are deleting the root. This
+                * is like dropping fs roots of deleted snapshots/subvolumes, we
+                * need the same protection.
+                *
+                * This also prevents races between concurrent tasks trying to
+                * disable quotas, because we will unlock and relock
+                * qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+                *
+                * We take this here because we have the dependency of
+                *
+                * inode_lock -> subvol_sem
+                *
+                * because of rename.  With relocation we can prealloc extents,
+                * so that makes the dependency chain
+                *
+                * cleaner_mutex -> inode_lock -> subvol_sem
+                *
+                * so we must take the cleaner_mutex here before we take the
+                * subvol_sem.  The deadlock can't actually happen, but this
+                * quiets lockdep.
+                */
+               mutex_lock(&fs_info->cleaner_mutex);
+               down_write(&fs_info->subvol_sem);
                ret = btrfs_quota_disable(fs_info);
+               up_write(&fs_info->subvol_sem);
+               mutex_unlock(&fs_info->cleaner_mutex);
                break;
        default:
                ret = -EINVAL;
@@ -3774,7 +3802,6 @@ static long btrfs_ioctl_quota_ctl(struct file *file, void __user *arg)
        }
 
        kfree(sa);
-       up_write(&fs_info->subvol_sem);
 drop_write:
        mnt_drop_write_file(file);
        return ret;
index b749ba45da2ba2b13d75695e882ab343dda9439c..c2a42bcde98e0ee4edc4ed171f44800cf53d7284 100644 (file)
@@ -1188,6 +1188,7 @@ struct btrfs_ordered_extent *btrfs_split_ordered_extent(
        ordered->disk_bytenr += len;
        ordered->num_bytes -= len;
        ordered->disk_num_bytes -= len;
+       ordered->ram_bytes -= len;
 
        if (test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags)) {
                ASSERT(ordered->bytes_left == 0);
index cf8820ce7aa2979920c6daafc1071c26571ecee6..364acc9bbe730c131a3e4f9b6c8387e6645e300a 100644 (file)
@@ -1342,16 +1342,10 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        lockdep_assert_held_write(&fs_info->subvol_sem);
 
        /*
-        * Lock the cleaner mutex to prevent races with concurrent relocation,
-        * because relocation may be building backrefs for blocks of the quota
-        * root while we are deleting the root. This is like dropping fs roots
-        * of deleted snapshots/subvolumes, we need the same protection.
-        *
-        * This also prevents races between concurrent tasks trying to disable
-        * quotas, because we will unlock and relock qgroup_ioctl_lock across
-        * BTRFS_FS_QUOTA_ENABLED changes.
+        * Relocation will mess with backrefs, so make sure we have the
+        * cleaner_mutex held to protect us from relocate.
         */
-       mutex_lock(&fs_info->cleaner_mutex);
+       lockdep_assert_held(&fs_info->cleaner_mutex);
 
        mutex_lock(&fs_info->qgroup_ioctl_lock);
        if (!fs_info->quota_root)
@@ -1373,9 +1367,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
        clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
        btrfs_qgroup_wait_for_completion(fs_info, false);
 
+       /*
+        * We have nothing held here and no trans handle, just return the error
+        * if there is one.
+        */
        ret = flush_reservations(fs_info);
        if (ret)
-               goto out_unlock_cleaner;
+               return ret;
 
        /*
         * 1 For the root item
@@ -1439,9 +1437,6 @@ out:
                btrfs_end_transaction(trans);
        else if (trans)
                ret = btrfs_commit_transaction(trans);
-out_unlock_cleaner:
-       mutex_unlock(&fs_info->cleaner_mutex);
-
        return ret;
 }
 
index f15591f3e54fa4cd7e92103e17b0ae74eb1a54f9..ef6bd2f4251b523ecfa4e0c31fc3c7eb0d26d332 100644 (file)
@@ -3455,6 +3455,7 @@ again:
                         * alignment and size).
                         */
                        ret = -EUCLEAN;
+                       mutex_unlock(&fs_info->reclaim_bgs_lock);
                        goto error;
                }
 
index 8aff1a724805a67ad3733fbce2127a39e563e01d..62da538d91cbd47cea44f4d021096c6a3a654908 100644 (file)
@@ -151,7 +151,7 @@ static int erofs_fscache_read_io_async(struct fscache_cookie *cookie,
                if (WARN_ON(len == 0))
                        source = NETFS_INVALID_READ;
                if (source != NETFS_READ_FROM_CACHE) {
-                       erofs_err(NULL, "prepare_read failed (source %d)", source);
+                       erofs_err(NULL, "prepare_ondemand_read failed (source %d)", source);
                        return -EIO;
                }
 
index 39c67119f43bfd762aa204a1edcaa77916f5fb76..d28ccfc0352b1ae4728fb25fdf8672bd0ee81789 100644 (file)
@@ -84,13 +84,6 @@ struct erofs_dev_context {
        bool flatdev;
 };
 
-struct erofs_fs_context {
-       struct erofs_mount_opts opt;
-       struct erofs_dev_context *devs;
-       char *fsid;
-       char *domain_id;
-};
-
 /* all filesystem-wide lz4 configurations */
 struct erofs_sb_lz4_info {
        /* # of pages needed for EROFS lz4 rolling decompression */
index c0eb139adb07a8ce852edd56370a79e5760036d9..30b49b2eee53409a0b4b293a4ff579eceb2d5087 100644 (file)
@@ -370,18 +370,18 @@ out:
        return ret;
 }
 
-static void erofs_default_options(struct erofs_fs_context *ctx)
+static void erofs_default_options(struct erofs_sb_info *sbi)
 {
 #ifdef CONFIG_EROFS_FS_ZIP
-       ctx->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
-       ctx->opt.max_sync_decompress_pages = 3;
-       ctx->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
+       sbi->opt.cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+       sbi->opt.max_sync_decompress_pages = 3;
+       sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_AUTO;
 #endif
 #ifdef CONFIG_EROFS_FS_XATTR
-       set_opt(&ctx->opt, XATTR_USER);
+       set_opt(&sbi->opt, XATTR_USER);
 #endif
 #ifdef CONFIG_EROFS_FS_POSIX_ACL
-       set_opt(&ctx->opt, POSIX_ACL);
+       set_opt(&sbi->opt, POSIX_ACL);
 #endif
 }
 
@@ -426,16 +426,16 @@ static const struct fs_parameter_spec erofs_fs_parameters[] = {
 static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
 {
 #ifdef CONFIG_FS_DAX
-       struct erofs_fs_context *ctx = fc->fs_private;
+       struct erofs_sb_info *sbi = fc->s_fs_info;
 
        switch (mode) {
        case EROFS_MOUNT_DAX_ALWAYS:
-               set_opt(&ctx->opt, DAX_ALWAYS);
-               clear_opt(&ctx->opt, DAX_NEVER);
+               set_opt(&sbi->opt, DAX_ALWAYS);
+               clear_opt(&sbi->opt, DAX_NEVER);
                return true;
        case EROFS_MOUNT_DAX_NEVER:
-               set_opt(&ctx->opt, DAX_NEVER);
-               clear_opt(&ctx->opt, DAX_ALWAYS);
+               set_opt(&sbi->opt, DAX_NEVER);
+               clear_opt(&sbi->opt, DAX_ALWAYS);
                return true;
        default:
                DBG_BUGON(1);
@@ -450,7 +450,7 @@ static bool erofs_fc_set_dax_mode(struct fs_context *fc, unsigned int mode)
 static int erofs_fc_parse_param(struct fs_context *fc,
                                struct fs_parameter *param)
 {
-       struct erofs_fs_context *ctx = fc->fs_private;
+       struct erofs_sb_info *sbi = fc->s_fs_info;
        struct fs_parse_result result;
        struct erofs_device_info *dif;
        int opt, ret;
@@ -463,9 +463,9 @@ static int erofs_fc_parse_param(struct fs_context *fc,
        case Opt_user_xattr:
 #ifdef CONFIG_EROFS_FS_XATTR
                if (result.boolean)
-                       set_opt(&ctx->opt, XATTR_USER);
+                       set_opt(&sbi->opt, XATTR_USER);
                else
-                       clear_opt(&ctx->opt, XATTR_USER);
+                       clear_opt(&sbi->opt, XATTR_USER);
 #else
                errorfc(fc, "{,no}user_xattr options not supported");
 #endif
@@ -473,16 +473,16 @@ static int erofs_fc_parse_param(struct fs_context *fc,
        case Opt_acl:
 #ifdef CONFIG_EROFS_FS_POSIX_ACL
                if (result.boolean)
-                       set_opt(&ctx->opt, POSIX_ACL);
+                       set_opt(&sbi->opt, POSIX_ACL);
                else
-                       clear_opt(&ctx->opt, POSIX_ACL);
+                       clear_opt(&sbi->opt, POSIX_ACL);
 #else
                errorfc(fc, "{,no}acl options not supported");
 #endif
                break;
        case Opt_cache_strategy:
 #ifdef CONFIG_EROFS_FS_ZIP
-               ctx->opt.cache_strategy = result.uint_32;
+               sbi->opt.cache_strategy = result.uint_32;
 #else
                errorfc(fc, "compression not supported, cache_strategy ignored");
 #endif
@@ -504,27 +504,27 @@ static int erofs_fc_parse_param(struct fs_context *fc,
                        kfree(dif);
                        return -ENOMEM;
                }
-               down_write(&ctx->devs->rwsem);
-               ret = idr_alloc(&ctx->devs->tree, dif, 0, 0, GFP_KERNEL);
-               up_write(&ctx->devs->rwsem);
+               down_write(&sbi->devs->rwsem);
+               ret = idr_alloc(&sbi->devs->tree, dif, 0, 0, GFP_KERNEL);
+               up_write(&sbi->devs->rwsem);
                if (ret < 0) {
                        kfree(dif->path);
                        kfree(dif);
                        return ret;
                }
-               ++ctx->devs->extra_devices;
+               ++sbi->devs->extra_devices;
                break;
 #ifdef CONFIG_EROFS_FS_ONDEMAND
        case Opt_fsid:
-               kfree(ctx->fsid);
-               ctx->fsid = kstrdup(param->string, GFP_KERNEL);
-               if (!ctx->fsid)
+               kfree(sbi->fsid);
+               sbi->fsid = kstrdup(param->string, GFP_KERNEL);
+               if (!sbi->fsid)
                        return -ENOMEM;
                break;
        case Opt_domain_id:
-               kfree(ctx->domain_id);
-               ctx->domain_id = kstrdup(param->string, GFP_KERNEL);
-               if (!ctx->domain_id)
+               kfree(sbi->domain_id);
+               sbi->domain_id = kstrdup(param->string, GFP_KERNEL);
+               if (!sbi->domain_id)
                        return -ENOMEM;
                break;
 #else
@@ -581,8 +581,7 @@ static const struct export_operations erofs_export_ops = {
 static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
 {
        struct inode *inode;
-       struct erofs_sb_info *sbi;
-       struct erofs_fs_context *ctx = fc->fs_private;
+       struct erofs_sb_info *sbi = EROFS_SB(sb);
        int err;
 
        sb->s_magic = EROFS_SUPER_MAGIC;
@@ -590,19 +589,6 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
        sb->s_maxbytes = MAX_LFS_FILESIZE;
        sb->s_op = &erofs_sops;
 
-       sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
-       if (!sbi)
-               return -ENOMEM;
-
-       sb->s_fs_info = sbi;
-       sbi->opt = ctx->opt;
-       sbi->devs = ctx->devs;
-       ctx->devs = NULL;
-       sbi->fsid = ctx->fsid;
-       ctx->fsid = NULL;
-       sbi->domain_id = ctx->domain_id;
-       ctx->domain_id = NULL;
-
        sbi->blkszbits = PAGE_SHIFT;
        if (erofs_is_fscache_mode(sb)) {
                sb->s_blocksize = PAGE_SIZE;
@@ -706,9 +692,9 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
 
 static int erofs_fc_get_tree(struct fs_context *fc)
 {
-       struct erofs_fs_context *ctx = fc->fs_private;
+       struct erofs_sb_info *sbi = fc->s_fs_info;
 
-       if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && ctx->fsid)
+       if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
                return get_tree_nodev(fc, erofs_fc_fill_super);
 
        return get_tree_bdev(fc, erofs_fc_fill_super);
@@ -718,19 +704,19 @@ static int erofs_fc_reconfigure(struct fs_context *fc)
 {
        struct super_block *sb = fc->root->d_sb;
        struct erofs_sb_info *sbi = EROFS_SB(sb);
-       struct erofs_fs_context *ctx = fc->fs_private;
+       struct erofs_sb_info *new_sbi = fc->s_fs_info;
 
        DBG_BUGON(!sb_rdonly(sb));
 
-       if (ctx->fsid || ctx->domain_id)
+       if (new_sbi->fsid || new_sbi->domain_id)
                erofs_info(sb, "ignoring reconfiguration for fsid|domain_id.");
 
-       if (test_opt(&ctx->opt, POSIX_ACL))
+       if (test_opt(&new_sbi->opt, POSIX_ACL))
                fc->sb_flags |= SB_POSIXACL;
        else
                fc->sb_flags &= ~SB_POSIXACL;
 
-       sbi->opt = ctx->opt;
+       sbi->opt = new_sbi->opt;
 
        fc->sb_flags |= SB_RDONLY;
        return 0;
@@ -761,12 +747,15 @@ static void erofs_free_dev_context(struct erofs_dev_context *devs)
 
 static void erofs_fc_free(struct fs_context *fc)
 {
-       struct erofs_fs_context *ctx = fc->fs_private;
+       struct erofs_sb_info *sbi = fc->s_fs_info;
 
-       erofs_free_dev_context(ctx->devs);
-       kfree(ctx->fsid);
-       kfree(ctx->domain_id);
-       kfree(ctx);
+       if (!sbi)
+               return;
+
+       erofs_free_dev_context(sbi->devs);
+       kfree(sbi->fsid);
+       kfree(sbi->domain_id);
+       kfree(sbi);
 }
 
 static const struct fs_context_operations erofs_context_ops = {
@@ -778,38 +767,35 @@ static const struct fs_context_operations erofs_context_ops = {
 
 static int erofs_init_fs_context(struct fs_context *fc)
 {
-       struct erofs_fs_context *ctx;
+       struct erofs_sb_info *sbi;
 
-       ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-       if (!ctx)
+       sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+       if (!sbi)
                return -ENOMEM;
-       ctx->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
-       if (!ctx->devs) {
-               kfree(ctx);
+
+       sbi->devs = kzalloc(sizeof(struct erofs_dev_context), GFP_KERNEL);
+       if (!sbi->devs) {
+               kfree(sbi);
                return -ENOMEM;
        }
-       fc->fs_private = ctx;
+       fc->s_fs_info = sbi;
 
-       idr_init(&ctx->devs->tree);
-       init_rwsem(&ctx->devs->rwsem);
-       erofs_default_options(ctx);
+       idr_init(&sbi->devs->tree);
+       init_rwsem(&sbi->devs->rwsem);
+       erofs_default_options(sbi);
        fc->ops = &erofs_context_ops;
        return 0;
 }
 
 static void erofs_kill_sb(struct super_block *sb)
 {
-       struct erofs_sb_info *sbi;
+       struct erofs_sb_info *sbi = EROFS_SB(sb);
 
-       if (erofs_is_fscache_mode(sb))
+       if (IS_ENABLED(CONFIG_EROFS_FS_ONDEMAND) && sbi->fsid)
                kill_anon_super(sb);
        else
                kill_block_super(sb);
 
-       sbi = EROFS_SB(sb);
-       if (!sbi)
-               return;
-
        erofs_free_dev_context(sbi->devs);
        fs_put_dax(sbi->dax_dev, NULL);
        erofs_fscache_unregister_fs(sb);
index c709c296ea9a49e0ccecdae975a5287aea5759fe..acef52ecb1bb7ee01ff448f0c066d4bc1c9df932 100644 (file)
@@ -2429,7 +2429,12 @@ static int nfs_net_init(struct net *net)
        struct nfs_net *nn = net_generic(net, nfs_net_id);
 
        nfs_clients_init(net);
-       rpc_proc_register(net, &nn->rpcstats);
+
+       if (!rpc_proc_register(net, &nn->rpcstats)) {
+               nfs_clients_exit(net);
+               return -ENOMEM;
+       }
+
        return nfs_fs_proc_net_init(net);
 }
 
index 1955481832e03796170ea8f80361bc25cc452ca6..a644460f3a5e7a7e3f8574ab922fef1e94ccb753 100644 (file)
@@ -3515,6 +3515,7 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
        args.exp = exp;
        args.dentry = dentry;
        args.ignore_crossmnt = (ignore_crossmnt != 0);
+       args.acl = NULL;
 
        /*
         * Make a local copy of the attribute bitmap that can be modified.
@@ -3573,7 +3574,6 @@ nfsd4_encode_fattr4(struct svc_rqst *rqstp, struct xdr_stream *xdr,
        } else
                args.fhp = fhp;
 
-       args.acl = NULL;
        if (attrmask[0] & FATTR4_WORD0_ACL) {
                err = nfsd4_get_nfs4_acl(rqstp, dentry, &args.acl);
                if (err == -EOPNOTSUPP)
index c99bc3df2d28e35f73b21d62ca50f4a53c515e1b..219ee7a76874439e9a2ecda7a402a7a9e741e0be 100644 (file)
@@ -963,6 +963,7 @@ bool bpf_jit_supports_far_kfunc_call(void);
 bool bpf_jit_supports_exceptions(void);
 bool bpf_jit_supports_ptr_xchg(void);
 bool bpf_jit_supports_arena(void);
+u64 bpf_arch_uaddress_limit(void);
 void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
 bool bpf_helper_changes_pkt_data(void *func);
 
index b743241cfb7caa22da0e12098da3ea7dead08a9b..d470303b1bbbbe9d3e468cab99cdb3e72067d66f 100644 (file)
@@ -1230,6 +1230,7 @@ int regmap_multi_reg_write_bypassed(struct regmap *map,
 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
                           const void *val, size_t val_len);
 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
+int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val);
 int regmap_raw_read(struct regmap *map, unsigned int reg,
                    void *val, size_t val_len);
 int regmap_noinc_read(struct regmap *map, unsigned int reg,
@@ -1739,6 +1740,13 @@ static inline int regmap_read(struct regmap *map, unsigned int reg,
        return -EINVAL;
 }
 
+static inline int regmap_read_bypassed(struct regmap *map, unsigned int reg,
+                                      unsigned int *val)
+{
+       WARN_ONCE(1, "regmap API is disabled");
+       return -EINVAL;
+}
+
 static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
                                  void *val, size_t val_len)
 {
index 4660582a33022fc15946a811105318199ccaca42..ed180ca419dad94d6e7db7eae820a26f8dcc5e48 100644 (file)
@@ -320,13 +320,13 @@ devm_regulator_get_exclusive(struct device *dev, const char *id)
 
 static inline int devm_regulator_get_enable(struct device *dev, const char *id)
 {
-       return -ENODEV;
+       return 0;
 }
 
 static inline int devm_regulator_get_enable_optional(struct device *dev,
                                                     const char *id)
 {
-       return -ENODEV;
+       return 0;
 }
 
 static inline struct regulator *__must_check
index e65ec3fd27998a5b82fc2c4597c575125e653056..a509caf823d6188070d17da6c7724f6d07eb0997 100644 (file)
@@ -461,10 +461,12 @@ static inline void sk_psock_put(struct sock *sk, struct sk_psock *psock)
 
 static inline void sk_psock_data_ready(struct sock *sk, struct sk_psock *psock)
 {
+       read_lock_bh(&sk->sk_callback_lock);
        if (psock->saved_data_ready)
                psock->saved_data_ready(sk);
        else
                sk->sk_data_ready(sk);
+       read_unlock_bh(&sk->sk_callback_lock);
 }
 
 static inline void psock_set_prog(struct bpf_prog **pprog,
index 50f1e403dbbb3805277d74ff80b504677e9a7aa0..c1d4ca0463a178736d2416e172784e35e333e126 100644 (file)
@@ -87,6 +87,15 @@ struct napi_gro_cb {
 
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
+
+       /* L3 offsets */
+       union {
+               struct {
+                       u16 network_offset;
+                       u16 inner_network_offset;
+               };
+               u16 network_offsets[2];
+       };
 };
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
index e0629699b56338a76b69616dcc2a79dece2e51f4..1a3c6f66f6205597cb46b3c7cc826802fc35c740 100644 (file)
@@ -267,6 +267,7 @@ struct cs35l56_base {
        bool fw_patched;
        bool secured;
        bool can_hibernate;
+       bool fw_owns_asp1;
        bool cal_data_valid;
        s8 cal_index;
        struct cirrus_amp_cal_data cal_data;
@@ -283,6 +284,7 @@ extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
 extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
 
 int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_init_asp1_regs_for_driver_control(struct cs35l56_base *cs35l56_base);
 int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base);
 int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
 int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
index 1af9e68193920d062f3c9cce40eba41dc8d74292..234b5baea69c8e6154c3c048fe002bf99337e245 100644 (file)
@@ -1684,8 +1684,8 @@ struct snd_emu1010 {
        unsigned int clock_fallback;
        unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
        unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
-       struct work_struct firmware_work;
-       struct work_struct clock_work;
+       struct work_struct work;
+       struct mutex lock;
 };
 
 struct snd_emu10k1 {
@@ -1834,6 +1834,9 @@ unsigned int snd_emu10k1_ptr20_read(struct snd_emu10k1 * emu, unsigned int reg,
 void snd_emu10k1_ptr20_write(struct snd_emu10k1 *emu, unsigned int reg, unsigned int chn, unsigned int data);
 int snd_emu10k1_spi_write(struct snd_emu10k1 * emu, unsigned int data);
 int snd_emu10k1_i2c_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
+static inline void snd_emu1010_fpga_lock(struct snd_emu10k1 *emu) { mutex_lock(&emu->emu1010.lock); };
+static inline void snd_emu1010_fpga_unlock(struct snd_emu10k1 *emu) { mutex_unlock(&emu->emu1010.lock); };
+void snd_emu1010_fpga_write_lock(struct snd_emu10k1 *emu, u32 reg, u32 value);
 void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value);
 void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value);
 void snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 *emu, u32 dst, u32 src);
index c5a9fcd2d622819cc1f5b0f5cfd772514ee5f423..29b2cd00df2ccf2b991b9b78f4514b76c87ee354 100644 (file)
@@ -19,7 +19,7 @@ int main(void)
        DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
        DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
 #ifdef CONFIG_SMP
-       DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
+       DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
 #endif
        DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
 #ifdef CONFIG_LRU_GEN
index 696bc55de8e82ea9358ede9c222b4927871e60be..1ea5ce5bb59933ac4449567c52f1981c8df8a139 100644 (file)
@@ -2942,6 +2942,15 @@ bool __weak bpf_jit_supports_arena(void)
        return false;
 }
 
+u64 __weak bpf_arch_uaddress_limit(void)
+{
+#if defined(CONFIG_64BIT) && defined(CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE)
+       return TASK_SIZE;
+#else
+       return 0;
+#endif
+}
+
 /* Return TRUE if the JIT backend satisfies the following two conditions:
  * 1) JIT backend supports atomic_xchg() on pointer-sized words.
  * 2) Under the specific arch, the implementation of xchg() is the same
index 98188379d5c77d79d3a5e764659ed4426f931d14..cb7ad1f795e18b1abec57dc5e40f2d852416bb04 100644 (file)
@@ -18289,8 +18289,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
                        f = fdget(fd);
                        map = __bpf_map_get(f);
                        if (IS_ERR(map)) {
-                               verbose(env, "fd %d is not pointing to valid bpf_map\n",
-                                       insn[0].imm);
+                               verbose(env, "fd %d is not pointing to valid bpf_map\n", fd);
                                return PTR_ERR(map);
                        }
 
@@ -19676,6 +19675,36 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
                        goto next_insn;
                }
 
+               /* Make it impossible to de-reference a userspace address */
+               if (BPF_CLASS(insn->code) == BPF_LDX &&
+                   (BPF_MODE(insn->code) == BPF_PROBE_MEM ||
+                    BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) {
+                       struct bpf_insn *patch = &insn_buf[0];
+                       u64 uaddress_limit = bpf_arch_uaddress_limit();
+
+                       if (!uaddress_limit)
+                               goto next_insn;
+
+                       *patch++ = BPF_MOV64_REG(BPF_REG_AX, insn->src_reg);
+                       if (insn->off)
+                               *patch++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_AX, insn->off);
+                       *patch++ = BPF_ALU64_IMM(BPF_RSH, BPF_REG_AX, 32);
+                       *patch++ = BPF_JMP_IMM(BPF_JLE, BPF_REG_AX, uaddress_limit >> 32, 2);
+                       *patch++ = *insn;
+                       *patch++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
+                       *patch++ = BPF_MOV64_IMM(insn->dst_reg, 0);
+
+                       cnt = patch - insn_buf;
+                       new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
+                       if (!new_prog)
+                               return -ENOMEM;
+
+                       delta    += cnt - 1;
+                       env->prog = prog = new_prog;
+                       insn      = new_prog->insnsi + i + delta;
+                       goto next_insn;
+               }
+
                /* Implement LD_ABS and LD_IND with a rewrite, if supported by the program type. */
                if (BPF_CLASS(insn->code) == BPF_LD &&
                    (BPF_MODE(insn->code) == BPF_ABS ||
index 0066c8f6c15442641889c87995410ac82d078423..d2dbe099286b95ca5f0b745e577af0e6ed297062 100644 (file)
@@ -1277,8 +1277,12 @@ static bool kick_pool(struct worker_pool *pool)
            !cpumask_test_cpu(p->wake_cpu, pool->attrs->__pod_cpumask)) {
                struct work_struct *work = list_first_entry(&pool->worklist,
                                                struct work_struct, entry);
-               p->wake_cpu = cpumask_any_distribute(pool->attrs->__pod_cpumask);
-               get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+               int wake_cpu = cpumask_any_and_distribute(pool->attrs->__pod_cpumask,
+                                                         cpu_online_mask);
+               if (wake_cpu < nr_cpu_ids) {
+                       p->wake_cpu = wake_cpu;
+                       get_work_pwq(work)->stats[PWQ_STAT_REPATRIATED]++;
+               }
        }
 #endif
        wake_up_process(p);
@@ -1594,6 +1598,15 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
        if (off_cpu >= 0)
                total_cpus--;
 
+       /* If all CPUs of the wq get offline, use the default values */
+       if (unlikely(!total_cpus)) {
+               for_each_node(node)
+                       wq_node_nr_active(wq, node)->max = min_active;
+
+               wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
+               return;
+       }
+
        for_each_node(node) {
                int node_cpus;
 
@@ -1606,7 +1619,7 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
                              min_active, max_active);
        }
 
-       wq_node_nr_active(wq, NUMA_NO_NODE)->max = min_active;
+       wq_node_nr_active(wq, NUMA_NO_NODE)->max = max_active;
 }
 
 /**
index c63a5fbf1f1c2b45b67603886ef9e6f07510f61d..291185f54ee4c217d04b8040b339dc1ad47f9cf6 100644 (file)
@@ -375,7 +375,7 @@ config DEBUG_INFO_SPLIT
          Incompatible with older versions of ccache.
 
 config DEBUG_INFO_BTF
-       bool "Generate BTF typeinfo"
+       bool "Generate BTF type information"
        depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
        depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
        depends on BPF_SYSCALL
@@ -408,7 +408,8 @@ config PAHOLE_HAS_LANG_EXCLUDE
          using DEBUG_INFO_BTF_MODULES.
 
 config DEBUG_INFO_BTF_MODULES
-       def_bool y
+       bool "Generate BTF type information for kernel modules"
+       default y
        depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
        help
          Generate compact split BTF type information for kernel modules.
index 68b45c82c37a6981bb72cc79ab4c4fcac8e2e489..7bc2220fea805855fadd0a1ed347ea2b9195e9e9 100644 (file)
@@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
        do {
                res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
                                             extraction_flags, &off);
-               if (res < 0)
+               if (res <= 0)
                        goto failed;
 
                len = res;
index f001582345052f8c26e008058ae5f721f8bc224d..9404dd551dfd2850117d4edf9b7fd25e3ba84322 100644 (file)
@@ -478,6 +478,8 @@ static struct sk_buff *vlan_gro_receive(struct list_head *head,
        if (unlikely(!vhdr))
                goto out;
 
+       NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = hlen;
+
        type = vhdr->h_vlan_encapsulated_proto;
 
        ptype = gro_find_receive_by_type(type);
index 7431f89e897b9549a0c35d9431e36b6de2e80022..d7c35f55bd69fb5e7d3add82a4ce7c48c83b8332 100644 (file)
@@ -266,7 +266,7 @@ static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
        if (skb->dev == p->dev && ether_addr_equal(src, addr))
                return;
 
-       skb = skb_copy(skb, GFP_ATOMIC);
+       skb = pskb_copy(skb, GFP_ATOMIC);
        if (!skb) {
                DEV_STATS_INC(dev, tx_dropped);
                return;
index 8adf95765cdd967a15b2661dfb454db0ccf350b0..ae5254f712c94b0c656fda5fe065ee635252a67e 100644 (file)
@@ -4360,10 +4360,12 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
        enum bpf_map_type map_type = ri->map_type;
        void *fwd = ri->tgt_value;
        u32 map_id = ri->map_id;
+       u32 flags = ri->flags;
        struct bpf_map *map;
        int err;
 
        ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+       ri->flags = 0;
        ri->map_type = BPF_MAP_TYPE_UNSPEC;
 
        if (unlikely(!xdpf)) {
@@ -4375,11 +4377,20 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri,
        case BPF_MAP_TYPE_DEVMAP:
                fallthrough;
        case BPF_MAP_TYPE_DEVMAP_HASH:
-               map = READ_ONCE(ri->map);
-               if (unlikely(map)) {
+               if (unlikely(flags & BPF_F_BROADCAST)) {
+                       map = READ_ONCE(ri->map);
+
+                       /* The map pointer is cleared when the map is being torn
+                        * down by bpf_clear_redirect_map()
+                        */
+                       if (unlikely(!map)) {
+                               err = -ENOENT;
+                               break;
+                       }
+
                        WRITE_ONCE(ri->map, NULL);
                        err = dev_map_enqueue_multi(xdpf, dev, map,
-                                                   ri->flags & BPF_F_EXCLUDE_INGRESS);
+                                                   flags & BPF_F_EXCLUDE_INGRESS);
                } else {
                        err = dev_map_enqueue(fwd, xdpf, dev);
                }
@@ -4442,9 +4453,9 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect_frame);
 static int xdp_do_generic_redirect_map(struct net_device *dev,
                                       struct sk_buff *skb,
                                       struct xdp_buff *xdp,
-                                      struct bpf_prog *xdp_prog,
-                                      void *fwd,
-                                      enum bpf_map_type map_type, u32 map_id)
+                                      struct bpf_prog *xdp_prog, void *fwd,
+                                      enum bpf_map_type map_type, u32 map_id,
+                                      u32 flags)
 {
        struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
        struct bpf_map *map;
@@ -4454,11 +4465,20 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
        case BPF_MAP_TYPE_DEVMAP:
                fallthrough;
        case BPF_MAP_TYPE_DEVMAP_HASH:
-               map = READ_ONCE(ri->map);
-               if (unlikely(map)) {
+               if (unlikely(flags & BPF_F_BROADCAST)) {
+                       map = READ_ONCE(ri->map);
+
+                       /* The map pointer is cleared when the map is being torn
+                        * down by bpf_clear_redirect_map()
+                        */
+                       if (unlikely(!map)) {
+                               err = -ENOENT;
+                               break;
+                       }
+
                        WRITE_ONCE(ri->map, NULL);
                        err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
-                                                    ri->flags & BPF_F_EXCLUDE_INGRESS);
+                                                    flags & BPF_F_EXCLUDE_INGRESS);
                } else {
                        err = dev_map_generic_redirect(fwd, skb, xdp_prog);
                }
@@ -4495,9 +4515,11 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
        enum bpf_map_type map_type = ri->map_type;
        void *fwd = ri->tgt_value;
        u32 map_id = ri->map_id;
+       u32 flags = ri->flags;
        int err;
 
        ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
+       ri->flags = 0;
        ri->map_type = BPF_MAP_TYPE_UNSPEC;
 
        if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
@@ -4517,7 +4539,7 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
                return 0;
        }
 
-       return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
+       return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id, flags);
 err:
        _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
        return err;
index 83f35d99a682c21dae11683fec72074a898fbac2..c7901253a1a8fc1e9425add77014e15b363a1623 100644 (file)
@@ -371,6 +371,7 @@ static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
        const skb_frag_t *frag0;
        unsigned int headlen;
 
+       NAPI_GRO_CB(skb)->network_offset = 0;
        NAPI_GRO_CB(skb)->data_offset = 0;
        headlen = skb_headlen(skb);
        NAPI_GRO_CB(skb)->frag0 = skb->data;
index b99127712e6704dda41636014db46096da171bfd..4096e679f61c76041223b894e3ee4f9a8a051000 100644 (file)
@@ -2123,11 +2123,17 @@ static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
 
 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 {
-       int headerlen = skb_headroom(skb);
-       unsigned int size = skb_end_offset(skb) + skb->data_len;
-       struct sk_buff *n = __alloc_skb(size, gfp_mask,
-                                       skb_alloc_rx_flag(skb), NUMA_NO_NODE);
+       struct sk_buff *n;
+       unsigned int size;
+       int headerlen;
+
+       if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+               return NULL;
 
+       headerlen = skb_headroom(skb);
+       size = skb_end_offset(skb) + skb->data_len;
+       n = __alloc_skb(size, gfp_mask,
+                       skb_alloc_rx_flag(skb), NUMA_NO_NODE);
        if (!n)
                return NULL;
 
@@ -2455,12 +2461,17 @@ struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
        /*
         *      Allocate the copy buffer
         */
-       struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
-                                       gfp_mask, skb_alloc_rx_flag(skb),
-                                       NUMA_NO_NODE);
-       int oldheadroom = skb_headroom(skb);
        int head_copy_len, head_copy_off;
+       struct sk_buff *n;
+       int oldheadroom;
+
+       if (WARN_ON_ONCE(skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST))
+               return NULL;
 
+       oldheadroom = skb_headroom(skb);
+       n = __alloc_skb(newheadroom + skb->len + newtailroom,
+                       gfp_mask, skb_alloc_rx_flag(skb),
+                       NUMA_NO_NODE);
        if (!n)
                return NULL;
 
index 4d75ef9d24bfa7cbffe642448f5116ac0b943ed2..fd20aae30be23cc2241f081a1d56215f9693cab0 100644 (file)
@@ -1226,11 +1226,8 @@ static void sk_psock_verdict_data_ready(struct sock *sk)
 
                rcu_read_lock();
                psock = sk_psock(sk);
-               if (psock) {
-                       read_lock_bh(&sk->sk_callback_lock);
+               if (psock)
                        sk_psock_data_ready(sk, psock);
-                       read_unlock_bh(&sk->sk_callback_lock);
-               }
                rcu_read_unlock();
        }
 }
index 55bd72997b31063b7baad350fdcff40e938aecb8..fafb123f798be346c247dac0f9dc8e498e034349 100644 (file)
@@ -1572,6 +1572,7 @@ struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb)
        /* The above will be needed by the transport layer if there is one
         * immediately following this IP hdr.
         */
+       NAPI_GRO_CB(skb)->inner_network_offset = off;
 
        /* Note : No need to call skb_gro_postpull_rcsum() here,
         * as we already checked checksum over ipv4 header was 0
index 1fe794967211e249016df00dc3c2ae230d71dcff..39229fd0601a11c3f2e58cd1e75db165a443b1a2 100644 (file)
@@ -1473,7 +1473,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
                 * by icmp_hdr(skb)->type.
                 */
                if (sk->sk_type == SOCK_RAW &&
-                   !inet_test_bit(HDRINCL, sk))
+                   !(fl4->flowi4_flags & FLOWI_FLAG_KNOWN_NH))
                        icmp_type = fl4->fl4_icmp_type;
                else
                        icmp_type = icmp_hdr(skb)->type;
index dcb11f22cbf2b437405d1b373dd0ebc37d02c9ec..4cb43401e0e06c5003c268c28bf0882ac4e8b4e0 100644 (file)
@@ -612,6 +612,9 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
                            (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
                           daddr, saddr, 0, 0, sk->sk_uid);
 
+       fl4.fl4_icmp_type = 0;
+       fl4.fl4_icmp_code = 0;
+
        if (!hdrincl) {
                rfv.msg = msg;
                rfv.hlen = 0;
index 420905be5f30c944ff360b349ae29d66104e0286..b32cf2eeeb41d1fc0bbe24e4888301f344ec8488 100644 (file)
@@ -532,7 +532,8 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
 struct sock *udp4_lib_lookup_skb(const struct sk_buff *skb,
                                 __be16 sport, __be16 dport)
 {
-       const struct iphdr *iph = ip_hdr(skb);
+       const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+       const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
        struct net *net = dev_net(skb->dev);
        int iif, sdif;
 
index 3498dd1d0694dc3ddb984177d2ddffb7b8abd0b9..8721fe5beca2bea692eb2cfa454e724e649cea6d 100644 (file)
@@ -471,6 +471,7 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
        struct sk_buff *p;
        unsigned int ulen;
        int ret = 0;
+       int flush;
 
        /* requires non zero csum, for symmetry with GSO */
        if (!uh->check) {
@@ -504,13 +505,22 @@ static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
                        return p;
                }
 
+               flush = NAPI_GRO_CB(p)->flush;
+
+               if (NAPI_GRO_CB(p)->flush_id != 1 ||
+                   NAPI_GRO_CB(p)->count != 1 ||
+                   !NAPI_GRO_CB(p)->is_atomic)
+                       flush |= NAPI_GRO_CB(p)->flush_id;
+               else
+                       NAPI_GRO_CB(p)->is_atomic = false;
+
                /* Terminate the flow on len mismatch or if it grow "too much".
                 * Under small packet flood GRO count could elsewhere grow a lot
                 * leading to excessive truesize values.
                 * On len mismatch merge the first packet shorter than gso_size,
                 * otherwise complete the GRO packet.
                 */
-               if (ulen > ntohs(uh2->len)) {
+               if (ulen > ntohs(uh2->len) || flush) {
                        pp = p;
                } else {
                        if (NAPI_GRO_CB(skb)->is_flist) {
@@ -718,7 +728,8 @@ EXPORT_SYMBOL(udp_gro_complete);
 
 INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
 {
-       const struct iphdr *iph = ip_hdr(skb);
+       const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+       const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
        struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
        /* do fraglist only if there is no outer UDP encap (or we already processed it) */
index b41e35af69ea2835aa47d6ca01d9b109d4092462..c8b909a9904f321b91c9cfef46da9bb491c18a7d 100644 (file)
@@ -237,6 +237,7 @@ INDIRECT_CALLABLE_SCOPE struct sk_buff *ipv6_gro_receive(struct list_head *head,
                goto out;
 
        skb_set_network_header(skb, off);
+       NAPI_GRO_CB(skb)->inner_network_offset = off;
 
        flush += ntohs(iph->payload_len) != skb->len - hlen;
 
index 1a4cccdd40c9ca44675cea5f5c2a08724ccb2d75..8f7aa8bac1e7b1eb330e4a6a9086b88e509886be 100644 (file)
@@ -272,7 +272,8 @@ static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
 struct sock *udp6_lib_lookup_skb(const struct sk_buff *skb,
                                 __be16 sport, __be16 dport)
 {
-       const struct ipv6hdr *iph = ipv6_hdr(skb);
+       const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+       const struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + offset);
        struct net *net = dev_net(skb->dev);
        int iif, sdif;
 
index bbd347de00b450bb3ecbbfa41c4dab9d36bb79d9..b41152dd424697a9fc3cef13fbb430de49dcb913 100644 (file)
@@ -164,7 +164,8 @@ flush:
 
 INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff)
 {
-       const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+       const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
+       const struct ipv6hdr *ipv6h = (struct ipv6hdr *)(skb->data + offset);
        struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
        /* do fraglist only if there is no outer UDP encap (or we already processed it) */
index 39e487ccc46881b2ede597672b0e3df63ba8b05a..8ba00ad433c21bfc8f6566bed1360cb8e4cf4944 100644 (file)
@@ -127,6 +127,9 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
        /* checksums verified by L2TP */
        skb->ip_summed = CHECKSUM_NONE;
 
+       /* drop outer flow-hash */
+       skb_clear_hash(skb);
+
        skb_dst_drop(skb);
        nf_reset_ct(skb);
 
index 7e74b812e366ae311f52615e9b304d6fe8b924b8..965eb69dc5de32b0d0ec01e5270b20a4fcb436b3 100644 (file)
@@ -3723,6 +3723,9 @@ static int mptcp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
                mptcp_subflow_early_fallback(msk, subflow);
        }
+
+       WRITE_ONCE(msk->write_seq, subflow->idsn);
+       WRITE_ONCE(msk->snd_nxt, subflow->idsn);
        if (likely(!__mptcp_check_fallback(msk)))
                MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVE);
 
index f4a38bd6a7e04f5944f8fa3d7f3cdc43caffed41..bfb7758063f315f4d13a6c17f8b01739933a6cf3 100644 (file)
@@ -77,13 +77,15 @@ EXPORT_SYMBOL_GPL(nsh_pop);
 static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
                                       netdev_features_t features)
 {
+       unsigned int outer_hlen, mac_len, nsh_len;
        struct sk_buff *segs = ERR_PTR(-EINVAL);
        u16 mac_offset = skb->mac_header;
-       unsigned int nsh_len, mac_len;
-       __be16 proto;
+       __be16 outer_proto, proto;
 
        skb_reset_network_header(skb);
 
+       outer_proto = skb->protocol;
+       outer_hlen = skb_mac_header_len(skb);
        mac_len = skb->mac_len;
 
        if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
@@ -113,10 +115,10 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
        }
 
        for (skb = segs; skb; skb = skb->next) {
-               skb->protocol = htons(ETH_P_NSH);
-               __skb_push(skb, nsh_len);
-               skb->mac_header = mac_offset;
-               skb->network_header = skb->mac_header + mac_len;
+               skb->protocol = outer_proto;
+               __skb_push(skb, nsh_len + outer_hlen);
+               skb_reset_mac_header(skb);
+               skb_set_network_header(skb, outer_hlen);
                skb->mac_len = mac_len;
        }
 
index 0af4642aeec4bcba890cfe0723b4ae001b378cf8..1539d315afe74ace265dd7ac32ffc7031d0b2323 100644 (file)
@@ -119,18 +119,13 @@ struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *lo
        switch (srx->transport.family) {
        case AF_INET:
                if (peer->srx.transport.sin.sin_port !=
-                   srx->transport.sin.sin_port ||
-                   peer->srx.transport.sin.sin_addr.s_addr !=
-                   srx->transport.sin.sin_addr.s_addr)
+                   srx->transport.sin.sin_port)
                        goto not_found;
                break;
 #ifdef CONFIG_AF_RXRPC_IPV6
        case AF_INET6:
                if (peer->srx.transport.sin6.sin6_port !=
-                   srx->transport.sin6.sin6_port ||
-                   memcmp(&peer->srx.transport.sin6.sin6_addr,
-                          &srx->transport.sin6.sin6_addr,
-                          sizeof(struct in6_addr)) != 0)
+                   srx->transport.sin6.sin6_port)
                        goto not_found;
                break;
 #endif
index f2701068ed9e4ce48c52848901e983f21de459c7..6716c021a532e184e5e09e50c958e4128d9f97bf 100644 (file)
@@ -19,7 +19,7 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
  */
 static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
 {
-       return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 0, gfp);
+       return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 1, gfp);
 }
 
 static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
index f1a68270862db9715801594e754f48d7a77d822d..48a1475e6b0634cc806641f4d5a134684d1c56cf 100644 (file)
@@ -155,7 +155,7 @@ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t rem
        switch (call->conn->security_level) {
        default:
                space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
-               return rxrpc_alloc_data_txbuf(call, space, 0, gfp);
+               return rxrpc_alloc_data_txbuf(call, space, 1, gfp);
        case RXRPC_SECURITY_AUTH:
                shdr = sizeof(struct rxkad_level1_hdr);
                break;
index e0679658d9de0e62edd777cad0b7e4281cfe2384..c3913d8a50d340bff0636f144f6c65afbe755cdd 100644 (file)
@@ -21,20 +21,20 @@ struct rxrpc_txbuf *rxrpc_alloc_data_txbuf(struct rxrpc_call *call, size_t data_
 {
        struct rxrpc_wire_header *whdr;
        struct rxrpc_txbuf *txb;
-       size_t total, hoff = 0;
+       size_t total, hoff;
        void *buf;
 
        txb = kmalloc(sizeof(*txb), gfp);
        if (!txb)
                return NULL;
 
-       if (data_align)
-               hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
+       hoff = round_up(sizeof(*whdr), data_align) - sizeof(*whdr);
        total = hoff + sizeof(*whdr) + data_size;
 
+       data_align = umax(data_align, L1_CACHE_BYTES);
        mutex_lock(&call->conn->tx_data_alloc_lock);
-       buf = __page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
-                                     ~(data_align - 1) & ~(L1_CACHE_BYTES - 1));
+       buf = page_frag_alloc_align(&call->conn->tx_data_alloc, total, gfp,
+                                   data_align);
        mutex_unlock(&call->conn->tx_data_alloc_lock);
        if (!buf) {
                kfree(txb);
index bb9b747d58a1afac3619b80cff3e8196e36d96c3..ce18716491c8fed0cbe234f2044e3539c3bac6a3 100644 (file)
@@ -2664,6 +2664,7 @@ static void xs_tcp_tls_setup_socket(struct work_struct *work)
                .xprtsec        = {
                        .policy         = RPC_XPRTSEC_NONE,
                },
+               .stats          = upper_clnt->cl_stats,
        };
        unsigned int pflags = current->flags;
        struct rpc_clnt *lower_clnt;
index 5c9fd4791c4ba1976f1d3d7fcb9ad458df1436f7..76284fc538ebdd38d52c879bd4fc58c84abc6371 100644 (file)
@@ -142,9 +142,9 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
        if (fragid == FIRST_FRAGMENT) {
                if (unlikely(head))
                        goto err;
-               *buf = NULL;
                if (skb_has_frag_list(frag) && __skb_linearize(frag))
                        goto err;
+               *buf = NULL;
                frag = skb_unshare(frag, GFP_ATOMIC);
                if (unlikely(!frag))
                        goto err;
@@ -156,6 +156,11 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
        if (!head)
                goto err;
 
+       /* Either the input skb ownership is transferred to headskb
+        * or the input skb is freed, clear the reference to avoid
+        * bad access on error path.
+        */
+       *buf = NULL;
        if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
                kfree_skb_partial(frag, headstolen);
        } else {
@@ -179,7 +184,6 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
                *headbuf = NULL;
                return 1;
        }
-       *buf = NULL;
        return 0;
 err:
        kfree_skb(*buf);
index 6a384b922e4fada5bfed7fa8b7e0f95ea7e3457a..d1f6cdcf1866e4162286b32072f7a21dc93ea94f 100644 (file)
@@ -557,9 +557,32 @@ static const struct config_entry *snd_intel_dsp_find_config
                if (table->codec_hid) {
                        int i;
 
-                       for (i = 0; i < table->codec_hid->num_codecs; i++)
-                               if (acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
+                       for (i = 0; i < table->codec_hid->num_codecs; i++) {
+                               struct nhlt_acpi_table *nhlt;
+                               bool ssp_found = false;
+
+                               if (!acpi_dev_present(table->codec_hid->codecs[i], NULL, -1))
+                                       continue;
+
+                               nhlt = intel_nhlt_init(&pci->dev);
+                               if (!nhlt) {
+                                       dev_warn(&pci->dev, "%s: NHLT table not found, skipped HID %s\n",
+                                                __func__, table->codec_hid->codecs[i]);
+                                       continue;
+                               }
+
+                               if (intel_nhlt_has_endpoint_type(nhlt, NHLT_LINK_SSP) &&
+                                   intel_nhlt_ssp_endpoint_mask(nhlt, NHLT_DEVICE_I2S))
+                                       ssp_found = true;
+
+                               intel_nhlt_free(nhlt);
+
+                               if (ssp_found)
                                        break;
+
+                               dev_warn(&pci->dev, "%s: no valid SSP found for HID %s, skipped\n",
+                                        __func__, table->codec_hid->codecs[i]);
+                       }
                        if (i == table->codec_hid->num_codecs)
                                continue;
                }
index 5f60658c6051c90bf06b42c8d39a53e3dd54c1f1..d7417a40392b2dc02647ff3ffb0802cfada6fa35 100644 (file)
@@ -45,6 +45,8 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, u8 idx)
                                 "intel-quirk-mask",
                                 &quirk_mask);
 
+       fwnode_handle_put(link);
+
        if (quirk_mask & SDW_INTEL_QUIRK_MASK_BUS_DISABLE)
                return false;
 
index fe72e7d7724127418b0b5351e64f6dc0d8b2c831..dadeda7758ceebce94d6205d38b0807b1e6c5587 100644 (file)
@@ -189,8 +189,7 @@ static int snd_emu10k1_suspend(struct device *dev)
 
        emu->suspend = 1;
 
-       cancel_work_sync(&emu->emu1010.firmware_work);
-       cancel_work_sync(&emu->emu1010.clock_work);
+       cancel_work_sync(&emu->emu1010.work);
 
        snd_ac97_suspend(emu->ac97);
 
index de5c41e578e1f4ac3cf085f1c8b20c75a545fce8..8ccc0178360ce8c0a5d70b920d212ca299310fd6 100644 (file)
@@ -732,69 +732,67 @@ static int snd_emu1010_load_firmware(struct snd_emu10k1 *emu, int dock,
        return snd_emu1010_load_firmware_entry(emu, *fw);
 }
 
-static void emu1010_firmware_work(struct work_struct *work)
+static void snd_emu1010_load_dock_firmware(struct snd_emu10k1 *emu)
 {
-       struct snd_emu10k1 *emu;
-       u32 tmp, tmp2, reg;
+       u32 tmp, tmp2;
        int err;
 
-       emu = container_of(work, struct snd_emu10k1,
-                          emu1010.firmware_work);
-       if (emu->card->shutdown)
+       // The docking events clearly arrive prematurely - while the
+       // Dock's FPGA seems to be successfully programmed, the Dock
+       // fails to initialize subsequently if we don't give it some
+       // time to "warm up" here.
+       msleep(200);
+
+       dev_info(emu->card->dev, "emu1010: Loading Audio Dock Firmware\n");
+       /* Return to Audio Dock programming mode */
+       snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG,
+                              EMU_HANA_FPGA_CONFIG_AUDIODOCK);
+       err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw);
+       if (err < 0)
                return;
-#ifdef CONFIG_PM_SLEEP
-       if (emu->suspend)
+       snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
+
+       snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp);
+       dev_dbg(emu->card->dev, "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp);
+       if ((tmp & 0x1f) != 0x15) {
+               /* FPGA failed to be programmed */
+               dev_err(emu->card->dev,
+                       "emu1010: Loading Audio Dock Firmware failed, reg = 0x%x\n",
+                       tmp);
                return;
-#endif
+       }
+       dev_info(emu->card->dev, "emu1010: Audio Dock Firmware loaded\n");
+
+       snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
+       snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
+       dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2);
+
+       /* Allow DLL to settle, to sync clocking between 1010 and Dock */
+       msleep(10);
+}
+
+static void emu1010_dock_event(struct snd_emu10k1 *emu)
+{
+       u32 reg;
+
        snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg); /* OPTIONS: Which cards are attached to the EMU */
        if (reg & EMU_HANA_OPTION_DOCK_OFFLINE) {
                /* Audio Dock attached */
-               /* Return to Audio Dock programming mode */
-               dev_info(emu->card->dev,
-                        "emu1010: Loading Audio Dock Firmware\n");
-               snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG,
-                                      EMU_HANA_FPGA_CONFIG_AUDIODOCK);
-               err = snd_emu1010_load_firmware(emu, 1, &emu->dock_fw);
-               if (err < 0)
-                       return;
-               snd_emu1010_fpga_write(emu, EMU_HANA_FPGA_CONFIG, 0);
-               snd_emu1010_fpga_read(emu, EMU_HANA_ID, &tmp);
-               dev_info(emu->card->dev,
-                        "emu1010: EMU_HANA+DOCK_ID = 0x%x\n", tmp);
-               if ((tmp & 0x1f) != 0x15) {
-                       /* FPGA failed to be programmed */
-                       dev_info(emu->card->dev,
-                                "emu1010: Loading Audio Dock Firmware file failed, reg = 0x%x\n",
-                                tmp);
-                       return;
-               }
-               dev_info(emu->card->dev,
-                        "emu1010: Audio Dock Firmware loaded\n");
-               snd_emu1010_fpga_read(emu, EMU_DOCK_MAJOR_REV, &tmp);
-               snd_emu1010_fpga_read(emu, EMU_DOCK_MINOR_REV, &tmp2);
-               dev_info(emu->card->dev, "Audio Dock ver: %u.%u\n", tmp, tmp2);
-               /* Sync clocking between 1010 and Dock */
-               /* Allow DLL to settle */
-               msleep(10);
+               snd_emu1010_load_dock_firmware(emu);
                /* Unmute all. Default is muted after a firmware load */
                snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
+       } else if (!(reg & EMU_HANA_OPTION_DOCK_ONLINE)) {
+               /* Audio Dock removed */
+               dev_info(emu->card->dev, "emu1010: Audio Dock detached\n");
+               /* The hardware auto-mutes all, so we unmute again */
+               snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
        }
 }
 
-static void emu1010_clock_work(struct work_struct *work)
+static void emu1010_clock_event(struct snd_emu10k1 *emu)
 {
-       struct snd_emu10k1 *emu;
        struct snd_ctl_elem_id id;
 
-       emu = container_of(work, struct snd_emu10k1,
-                          emu1010.clock_work);
-       if (emu->card->shutdown)
-               return;
-#ifdef CONFIG_PM_SLEEP
-       if (emu->suspend)
-               return;
-#endif
-
        spin_lock_irq(&emu->reg_lock);
        // This is the only thing that can actually happen.
        emu->emu1010.clock_source = emu->emu1010.clock_fallback;
@@ -805,21 +803,44 @@ static void emu1010_clock_work(struct work_struct *work)
        snd_ctl_notify(emu->card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
 }
 
-static void emu1010_interrupt(struct snd_emu10k1 *emu)
+static void emu1010_work(struct work_struct *work)
 {
+       struct snd_emu10k1 *emu;
        u32 sts;
 
+       emu = container_of(work, struct snd_emu10k1, emu1010.work);
+       if (emu->card->shutdown)
+               return;
+#ifdef CONFIG_PM_SLEEP
+       if (emu->suspend)
+               return;
+#endif
+
+       snd_emu1010_fpga_lock(emu);
+
        snd_emu1010_fpga_read(emu, EMU_HANA_IRQ_STATUS, &sts);
-       if (sts & EMU_HANA_IRQ_DOCK_LOST) {
-               /* Audio Dock removed */
-               dev_info(emu->card->dev, "emu1010: Audio Dock detached\n");
-               /* The hardware auto-mutes all, so we unmute again */
-               snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
-       } else if (sts & EMU_HANA_IRQ_DOCK) {
-               schedule_work(&emu->emu1010.firmware_work);
-       }
+
+       // The distinction of the IRQ status bits is unreliable,
+       // so we dispatch later based on option card status.
+       if (sts & (EMU_HANA_IRQ_DOCK | EMU_HANA_IRQ_DOCK_LOST))
+               emu1010_dock_event(emu);
+
        if (sts & EMU_HANA_IRQ_WCLK_CHANGED)
-               schedule_work(&emu->emu1010.clock_work);
+               emu1010_clock_event(emu);
+
+       snd_emu1010_fpga_unlock(emu);
+}
+
+static void emu1010_interrupt(struct snd_emu10k1 *emu)
+{
+       // We get an interrupt on each GPIO input pin change, but we
+       // care only about the ones triggered by the dedicated pin.
+       u16 sts = inw(emu->port + A_GPIO);
+       u16 bit = emu->card_capabilities->ca0108_chip ? 0x2000 : 0x8000;
+       if (!(sts & bit))
+               return;
+
+       schedule_work(&emu->emu1010.work);
 }
 
 /*
@@ -841,6 +862,8 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
         * Proper init follows in snd_emu10k1_init(). */
        outl(HCFG_LOCKSOUNDCACHE | HCFG_LOCKTANKCACHE_MASK, emu->port + HCFG);
 
+       snd_emu1010_fpga_lock(emu);
+
        /* Disable 48Volt power to Audio Dock */
        snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0);
 
@@ -866,7 +889,7 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
        err = snd_emu1010_load_firmware(emu, 0, &emu->firmware);
        if (err < 0) {
                dev_info(emu->card->dev, "emu1010: Loading Firmware failed\n");
-               return err;
+               goto fail;
        }
 
        /* ID, should read & 0x7f = 0x55 when FPGA programmed. */
@@ -876,7 +899,8 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
                dev_info(emu->card->dev,
                         "emu1010: Loading Hana Firmware file failed, reg = 0x%x\n",
                         reg);
-               return -ENODEV;
+               err = -ENODEV;
+               goto fail;
        }
 
        dev_info(emu->card->dev, "emu1010: Hana Firmware loaded\n");
@@ -889,7 +913,7 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
        snd_emu1010_fpga_read(emu, EMU_HANA_OPTION_CARDS, &reg);
        dev_info(emu->card->dev, "emu1010: Card options = 0x%x\n", reg);
        if (reg & EMU_HANA_OPTION_DOCK_OFFLINE)
-               schedule_work(&emu->emu1010.firmware_work);
+               snd_emu1010_load_dock_firmware(emu);
        if (emu->card_capabilities->no_adat) {
                emu->emu1010.optical_in = 0; /* IN_SPDIF */
                emu->emu1010.optical_out = 0; /* OUT_SPDIF */
@@ -936,7 +960,9 @@ static int snd_emu10k1_emu1010_init(struct snd_emu10k1 *emu)
        // so it is safe to simply enable the outputs.
        snd_emu1010_fpga_write(emu, EMU_HANA_UNMUTE, EMU_UNMUTE);
 
-       return 0;
+fail:
+       snd_emu1010_fpga_unlock(emu);
+       return err;
 }
 /*
  *  Create the EMU10K1 instance
@@ -958,10 +984,10 @@ static void snd_emu10k1_free(struct snd_card *card)
        }
        if (emu->card_capabilities->emu_model == EMU_MODEL_EMU1010) {
                /* Disable 48Volt power to Audio Dock */
-               snd_emu1010_fpga_write(emu, EMU_HANA_DOCK_PWR, 0);
+               snd_emu1010_fpga_write_lock(emu, EMU_HANA_DOCK_PWR, 0);
        }
-       cancel_work_sync(&emu->emu1010.firmware_work);
-       cancel_work_sync(&emu->emu1010.clock_work);
+       cancel_work_sync(&emu->emu1010.work);
+       mutex_destroy(&emu->emu1010.lock);
        release_firmware(emu->firmware);
        release_firmware(emu->dock_fw);
        snd_util_memhdr_free(emu->memhdr);
@@ -1540,8 +1566,8 @@ int snd_emu10k1_create(struct snd_card *card,
        emu->irq = -1;
        emu->synth = NULL;
        emu->get_synth_voice = NULL;
-       INIT_WORK(&emu->emu1010.firmware_work, emu1010_firmware_work);
-       INIT_WORK(&emu->emu1010.clock_work, emu1010_clock_work);
+       INIT_WORK(&emu->emu1010.work, emu1010_work);
+       mutex_init(&emu->emu1010.lock);
        /* read revision & serial */
        emu->revision = pci->revision;
        pci_read_config_dword(pci, PCI_SUBSYSTEM_VENDOR_ID, &emu->serial);
index 0a32ea53d8c648a78f44227579a76c5e111eda89..05b98d9b547b2a9ffd063649311887693b1b20de 100644 (file)
@@ -661,7 +661,9 @@ static int snd_emu1010_output_source_put(struct snd_kcontrol *kcontrol,
        change = (emu->emu1010.output_source[channel] != val);
        if (change) {
                emu->emu1010.output_source[channel] = val;
+               snd_emu1010_fpga_lock(emu);
                snd_emu1010_output_source_apply(emu, channel, val);
+               snd_emu1010_fpga_unlock(emu);
        }
        return change;
 }
@@ -705,7 +707,9 @@ static int snd_emu1010_input_source_put(struct snd_kcontrol *kcontrol,
        change = (emu->emu1010.input_source[channel] != val);
        if (change) {
                emu->emu1010.input_source[channel] = val;
+               snd_emu1010_fpga_lock(emu);
                snd_emu1010_input_source_apply(emu, channel, val);
+               snd_emu1010_fpga_unlock(emu);
        }
        return change;
 }
@@ -774,7 +778,7 @@ static int snd_emu1010_adc_pads_put(struct snd_kcontrol *kcontrol, struct snd_ct
                cache = cache & ~mask;
        change = (cache != emu->emu1010.adc_pads);
        if (change) {
-               snd_emu1010_fpga_write(emu, EMU_HANA_ADC_PADS, cache );
+               snd_emu1010_fpga_write_lock(emu, EMU_HANA_ADC_PADS, cache );
                emu->emu1010.adc_pads = cache;
        }
 
@@ -832,7 +836,7 @@ static int snd_emu1010_dac_pads_put(struct snd_kcontrol *kcontrol, struct snd_ct
                cache = cache & ~mask;
        change = (cache != emu->emu1010.dac_pads);
        if (change) {
-               snd_emu1010_fpga_write(emu, EMU_HANA_DAC_PADS, cache );
+               snd_emu1010_fpga_write_lock(emu, EMU_HANA_DAC_PADS, cache );
                emu->emu1010.dac_pads = cache;
        }
 
@@ -980,6 +984,7 @@ static int snd_emu1010_clock_source_put(struct snd_kcontrol *kcontrol,
        val = ucontrol->value.enumerated.item[0] ;
        if (val >= emu_ci->num)
                return -EINVAL;
+       snd_emu1010_fpga_lock(emu);
        spin_lock_irq(&emu->reg_lock);
        change = (emu->emu1010.clock_source != val);
        if (change) {
@@ -996,6 +1001,7 @@ static int snd_emu1010_clock_source_put(struct snd_kcontrol *kcontrol,
        } else {
                spin_unlock_irq(&emu->reg_lock);
        }
+       snd_emu1010_fpga_unlock(emu);
        return change;
 }
 
@@ -1041,7 +1047,7 @@ static int snd_emu1010_clock_fallback_put(struct snd_kcontrol *kcontrol,
        change = (emu->emu1010.clock_fallback != val);
        if (change) {
                emu->emu1010.clock_fallback = val;
-               snd_emu1010_fpga_write(emu, EMU_HANA_DEFCLOCK, 1 - val);
+               snd_emu1010_fpga_write_lock(emu, EMU_HANA_DEFCLOCK, 1 - val);
        }
        return change;
 }
@@ -1093,7 +1099,7 @@ static int snd_emu1010_optical_out_put(struct snd_kcontrol *kcontrol,
                emu->emu1010.optical_out = val;
                tmp = (emu->emu1010.optical_in ? EMU_HANA_OPTICAL_IN_ADAT : EMU_HANA_OPTICAL_IN_SPDIF) |
                        (emu->emu1010.optical_out ? EMU_HANA_OPTICAL_OUT_ADAT : EMU_HANA_OPTICAL_OUT_SPDIF);
-               snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, tmp);
+               snd_emu1010_fpga_write_lock(emu, EMU_HANA_OPTICAL_TYPE, tmp);
        }
        return change;
 }
@@ -1144,7 +1150,7 @@ static int snd_emu1010_optical_in_put(struct snd_kcontrol *kcontrol,
                emu->emu1010.optical_in = val;
                tmp = (emu->emu1010.optical_in ? EMU_HANA_OPTICAL_IN_ADAT : EMU_HANA_OPTICAL_IN_SPDIF) |
                        (emu->emu1010.optical_out ? EMU_HANA_OPTICAL_OUT_ADAT : EMU_HANA_OPTICAL_OUT_SPDIF);
-               snd_emu1010_fpga_write(emu, EMU_HANA_OPTICAL_TYPE, tmp);
+               snd_emu1010_fpga_write_lock(emu, EMU_HANA_OPTICAL_TYPE, tmp);
        }
        return change;
 }
@@ -2323,7 +2329,9 @@ int snd_emu10k1_mixer(struct snd_emu10k1 *emu,
                for (i = 0; i < emu_ri->n_outs; i++)
                        emu->emu1010.output_source[i] =
                                emu1010_map_source(emu_ri, emu_ri->out_dflts[i]);
+               snd_emu1010_fpga_lock(emu);
                snd_emu1010_apply_sources(emu);
+               snd_emu1010_fpga_unlock(emu);
 
                kctl = emu->ctl_clock_source = snd_ctl_new1(&snd_emu1010_clock_source, emu);
                err = snd_ctl_add(card, kctl);
index 2f80fd91017c3994e40017f9a9bb5c22ea21603a..737c28d31b41a39a4ca1ddb335df6b907c8e8a30 100644 (file)
@@ -165,6 +165,8 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
        u32 value2;
 
        if (emu->card_capabilities->emu_model) {
+               snd_emu1010_fpga_lock(emu);
+
                // This represents the S/PDIF lock status on 0404b, which is
                // kinda weird and unhelpful, because monitoring it via IRQ is
                // impractical (one gets an IRQ flood as long as it is desynced).
@@ -197,6 +199,8 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
                        snd_iprintf(buffer, "\nS/PDIF mode: %s%s\n",
                                    value & EMU_HANA_SPDIF_MODE_RX_PRO ? "professional" : "consumer",
                                    value & EMU_HANA_SPDIF_MODE_RX_NOCOPY ? ", no copy" : "");
+
+               snd_emu1010_fpga_unlock(emu);
        } else {
                snd_emu10k1_proc_spdif_status(emu, buffer, "CD-ROM S/PDIF In", CDCS, CDSRCS);
                snd_emu10k1_proc_spdif_status(emu, buffer, "Optical or Coax S/PDIF In", GPSCS, GPSRCS);
@@ -458,6 +462,9 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
        struct snd_emu10k1 *emu = entry->private_data;
        u32 value;
        int i;
+
+       snd_emu1010_fpga_lock(emu);
+
        snd_iprintf(buffer, "EMU1010 Registers:\n\n");
 
        for(i = 0; i < 0x40; i+=1) {
@@ -496,6 +503,8 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
                        snd_emu_proc_emu1010_link_read(emu, buffer, 0x701);
                }
        }
+
+       snd_emu1010_fpga_unlock(emu);
 }
 
 static void snd_emu_proc_io_reg_read(struct snd_info_entry *entry,
index 74df2330015f66ef3930d05884092d74e4b100af..f4a1c2d4b0787518634dac52ebae9c41a01f05dc 100644 (file)
@@ -285,24 +285,33 @@ static void snd_emu1010_fpga_write_locked(struct snd_emu10k1 *emu, u32 reg, u32
        outw(value, emu->port + A_GPIO);
        udelay(10);
        outw(value | 0x80 , emu->port + A_GPIO);  /* High bit clocks the value into the fpga. */
+       udelay(10);
 }
 
 void snd_emu1010_fpga_write(struct snd_emu10k1 *emu, u32 reg, u32 value)
 {
-       unsigned long flags;
+       if (snd_BUG_ON(!mutex_is_locked(&emu->emu1010.lock)))
+               return;
+       snd_emu1010_fpga_write_locked(emu, reg, value);
+}
 
-       spin_lock_irqsave(&emu->emu_lock, flags);
+void snd_emu1010_fpga_write_lock(struct snd_emu10k1 *emu, u32 reg, u32 value)
+{
+       snd_emu1010_fpga_lock(emu);
        snd_emu1010_fpga_write_locked(emu, reg, value);
-       spin_unlock_irqrestore(&emu->emu_lock, flags);
+       snd_emu1010_fpga_unlock(emu);
 }
 
-static void snd_emu1010_fpga_read_locked(struct snd_emu10k1 *emu, u32 reg, u32 *value)
+void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value)
 {
        // The higest input pin is used as the designated interrupt trigger,
        // so it needs to be masked out.
        // But note that any other input pin change will also cause an IRQ,
        // so using this function often causes an IRQ as a side effect.
        u32 mask = emu->card_capabilities->ca0108_chip ? 0x1f : 0x7f;
+
+       if (snd_BUG_ON(!mutex_is_locked(&emu->emu1010.lock)))
+               return;
        if (snd_BUG_ON(reg > 0x3f))
                return;
        reg += 0x40; /* 0x40 upwards are registers. */
@@ -313,47 +322,31 @@ static void snd_emu1010_fpga_read_locked(struct snd_emu10k1 *emu, u32 reg, u32 *
        *value = ((inw(emu->port + A_GPIO) >> 8) & mask);
 }
 
-void snd_emu1010_fpga_read(struct snd_emu10k1 *emu, u32 reg, u32 *value)
-{
-       unsigned long flags;
-
-       spin_lock_irqsave(&emu->emu_lock, flags);
-       snd_emu1010_fpga_read_locked(emu, reg, value);
-       spin_unlock_irqrestore(&emu->emu_lock, flags);
-}
-
 /* Each Destination has one and only one Source,
  * but one Source can feed any number of Destinations simultaneously.
  */
 void snd_emu1010_fpga_link_dst_src_write(struct snd_emu10k1 *emu, u32 dst, u32 src)
 {
-       unsigned long flags;
-
        if (snd_BUG_ON(dst & ~0x71f))
                return;
        if (snd_BUG_ON(src & ~0x71f))
                return;
-       spin_lock_irqsave(&emu->emu_lock, flags);
-       snd_emu1010_fpga_write_locked(emu, EMU_HANA_DESTHI, dst >> 8);
-       snd_emu1010_fpga_write_locked(emu, EMU_HANA_DESTLO, dst & 0x1f);
-       snd_emu1010_fpga_write_locked(emu, EMU_HANA_SRCHI, src >> 8);
-       snd_emu1010_fpga_write_locked(emu, EMU_HANA_SRCLO, src & 0x1f);
-       spin_unlock_irqrestore(&emu->emu_lock, flags);
+       snd_emu1010_fpga_write(emu, EMU_HANA_DESTHI, dst >> 8);
+       snd_emu1010_fpga_write(emu, EMU_HANA_DESTLO, dst & 0x1f);
+       snd_emu1010_fpga_write(emu, EMU_HANA_SRCHI, src >> 8);
+       snd_emu1010_fpga_write(emu, EMU_HANA_SRCLO, src & 0x1f);
 }
 
 u32 snd_emu1010_fpga_link_dst_src_read(struct snd_emu10k1 *emu, u32 dst)
 {
-       unsigned long flags;
        u32 hi, lo;
 
        if (snd_BUG_ON(dst & ~0x71f))
                return 0;
-       spin_lock_irqsave(&emu->emu_lock, flags);
-       snd_emu1010_fpga_write_locked(emu, EMU_HANA_DESTHI, dst >> 8);
-       snd_emu1010_fpga_write_locked(emu, EMU_HANA_DESTLO, dst & 0x1f);
-       snd_emu1010_fpga_read_locked(emu, EMU_HANA_SRCHI, &hi);
-       snd_emu1010_fpga_read_locked(emu, EMU_HANA_SRCLO, &lo);
-       spin_unlock_irqrestore(&emu->emu_lock, flags);
+       snd_emu1010_fpga_write(emu, EMU_HANA_DESTHI, dst >> 8);
+       snd_emu1010_fpga_write(emu, EMU_HANA_DESTLO, dst & 0x1f);
+       snd_emu1010_fpga_read(emu, EMU_HANA_SRCHI, &hi);
+       snd_emu1010_fpga_read(emu, EMU_HANA_SRCLO, &lo);
        return (hi << 8) | lo;
 }
 
index 1a3f84599cb584331f9766ca6bd5b00bacaa580d..558c1f38fe971859d809e25d774069e73cc642b1 100644 (file)
@@ -644,6 +644,8 @@ static int cs35l56_hda_fw_load(struct cs35l56_hda *cs35l56)
                ret = cs35l56_wait_for_firmware_boot(&cs35l56->base);
                if (ret)
                        goto err_powered_up;
+
+               regcache_cache_only(cs35l56->base.regmap, false);
        }
 
        /* Disable auto-hibernate so that runtime_pm has control */
@@ -1002,6 +1004,8 @@ int cs35l56_hda_common_probe(struct cs35l56_hda *cs35l56, int hid, int id)
        if (ret)
                goto err;
 
+       regcache_cache_only(cs35l56->base.regmap, false);
+
        ret = cs35l56_set_patch(&cs35l56->base);
        if (ret)
                goto err;
index 70d80b6af3fe370aa9c6f9fd471676ba9c18da72..b29739bd330b1331848553d1f2854694a1be369a 100644 (file)
@@ -920,6 +920,8 @@ static void alc_pre_init(struct hda_codec *codec)
        ((codec)->core.dev.power.power_state.event == PM_EVENT_RESUME)
 #define is_s4_resume(codec) \
        ((codec)->core.dev.power.power_state.event == PM_EVENT_RESTORE)
+#define is_s4_suspend(codec) \
+       ((codec)->core.dev.power.power_state.event == PM_EVENT_FREEZE)
 
 static int alc_init(struct hda_codec *codec)
 {
@@ -7183,6 +7185,46 @@ static void alc245_fixup_hp_spectre_x360_eu0xxx(struct hda_codec *codec,
        alc245_fixup_hp_gpio_led(codec, fix, action);
 }
 
+/*
+ * ALC287 PCM hooks
+ */
+static void alc287_alc1318_playback_pcm_hook(struct hda_pcm_stream *hinfo,
+                                  struct hda_codec *codec,
+                                  struct snd_pcm_substream *substream,
+                                  int action)
+{
+       alc_write_coef_idx(codec, 0x10, 0x8806); /* Change MLK to GPIO3 */
+       switch (action) {
+       case HDA_GEN_PCM_ACT_OPEN:
+               alc_write_coefex_idx(codec, 0x5a, 0x00, 0x954f); /* write gpio3 to high */
+               break;
+       case HDA_GEN_PCM_ACT_CLOSE:
+               alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */
+               break;
+       }
+}
+
+static void __maybe_unused alc287_s4_power_gpio3_default(struct hda_codec *codec)
+{
+       if (is_s4_suspend(codec)) {
+               alc_write_coef_idx(codec, 0x10, 0x8806); /* Change MLK to GPIO3 */
+               alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */
+       }
+}
+
+static void alc287_fixup_lenovo_thinkpad_with_alc1318(struct hda_codec *codec,
+                              const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+#ifdef CONFIG_PM
+       spec->power_hook = alc287_s4_power_gpio3_default;
+#endif
+       spec->gen.pcm_playback_hook = alc287_alc1318_playback_pcm_hook;
+}
+
 
 enum {
        ALC269_FIXUP_GPIO2,
@@ -7426,6 +7468,7 @@ enum {
        ALC287_FIXUP_YOGA7_14ITL_SPEAKERS,
        ALC298_FIXUP_LENOVO_C940_DUET7,
        ALC287_FIXUP_LENOVO_14IRP8_DUETITL,
+       ALC287_FIXUP_LENOVO_LEGION_7,
        ALC287_FIXUP_13S_GEN2_SPEAKERS,
        ALC256_FIXUP_SET_COEF_DEFAULTS,
        ALC256_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
@@ -7470,7 +7513,8 @@ enum {
        ALC285_FIXUP_ASUS_GA403U_HEADSET_MIC,
        ALC285_FIXUP_ASUS_GA403U_I2C_SPEAKER2_TO_DAC1,
        ALC285_FIXUP_ASUS_GU605_SPI_2_HEADSET_MIC,
-       ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1
+       ALC285_FIXUP_ASUS_GU605_SPI_SPEAKER2_TO_DAC1,
+       ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318,
 };
 
 /* A special fixup for Lenovo C940 and Yoga Duet 7;
@@ -7510,6 +7554,23 @@ static void alc287_fixup_lenovo_14irp8_duetitl(struct hda_codec *codec,
        __snd_hda_apply_fixup(codec, id, action, 0);
 }
 
+/* Another hilarious PCI SSID conflict with Lenovo Legion Pro 7 16ARX8H (with
+ * TAS2781 codec) and Legion 7i 16IAX7 (with CS35L41 codec);
+ * we apply a corresponding fixup depending on the codec SSID instead
+ */
+static void alc287_fixup_lenovo_legion_7(struct hda_codec *codec,
+                                        const struct hda_fixup *fix,
+                                        int action)
+{
+       int id;
+
+       if (codec->core.subsystem_id == 0x17aa38a8)
+               id = ALC287_FIXUP_TAS2781_I2C; /* Legion Pro 7 16ARX8H */
+       else
+               id = ALC287_FIXUP_CS35L41_I2C_2; /* Legion 7i 16IAX7 */
+       __snd_hda_apply_fixup(codec, id, action, 0);
+}
+
 static const struct hda_fixup alc269_fixups[] = {
        [ALC269_FIXUP_GPIO2] = {
                .type = HDA_FIXUP_FUNC,
@@ -9404,6 +9465,10 @@ static const struct hda_fixup alc269_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = alc287_fixup_lenovo_14irp8_duetitl,
        },
+       [ALC287_FIXUP_LENOVO_LEGION_7] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc287_fixup_lenovo_legion_7,
+       },
        [ALC287_FIXUP_13S_GEN2_SPEAKERS] = {
                .type = HDA_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -9726,6 +9791,12 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC285_FIXUP_ASUS_GA403U,
        },
+       [ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc287_fixup_lenovo_thinkpad_with_alc1318,
+               .chained = true,
+               .chain_id = ALC269_FIXUP_THINKPAD_ACPI
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -9937,6 +10008,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x860f, "HP ZBook 15 G6", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x861f, "HP Elite Dragonfly G1", ALC285_FIXUP_HP_GPIO_AMP_INIT),
        SND_PCI_QUIRK(0x103c, 0x869d, "HP", ALC236_FIXUP_HP_MUTE_LED),
+       SND_PCI_QUIRK(0x103c, 0x86c1, "HP Laptop 15-da3001TU", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
        SND_PCI_QUIRK(0x103c, 0x86c7, "HP Envy AiO 32", ALC274_FIXUP_HP_ENVY_GPIO),
        SND_PCI_QUIRK(0x103c, 0x86e7, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
        SND_PCI_QUIRK(0x103c, 0x86e8, "HP Spectre x360 15-eb0xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1),
@@ -10393,6 +10465,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2318, "Thinkpad Z13 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x2319, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x231a, "Thinkpad Z16 Gen2", ALC287_FIXUP_MG_RTKC_CSAMP_CS35L41_I2C_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x231e, "Thinkpad", ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318),
+       SND_PCI_QUIRK(0x17aa, 0x231f, "Thinkpad", ALC287_FIXUP_LENOVO_THKPAD_WH_ALC1318),
        SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
        SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
@@ -10422,7 +10496,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x3853, "Lenovo Yoga 7 15ITL5", ALC287_FIXUP_YOGA7_14ITL_SPEAKERS),
        SND_PCI_QUIRK(0x17aa, 0x3855, "Legion 7 16ITHG6", ALC287_FIXUP_LEGION_16ITHG6),
        SND_PCI_QUIRK(0x17aa, 0x3869, "Lenovo Yoga7 14IAL7", ALC287_FIXUP_YOGA9_14IAP7_BASS_SPK_PIN),
-       SND_PCI_QUIRK(0x17aa, 0x386f, "Legion 7i 16IAX7", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x17aa, 0x386f, "Legion Pro 7/7i", ALC287_FIXUP_LENOVO_LEGION_7),
        SND_PCI_QUIRK(0x17aa, 0x3870, "Lenovo Yoga 7 14ARB7", ALC287_FIXUP_YOGA7_14ARB7_I2C),
        SND_PCI_QUIRK(0x17aa, 0x3877, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
        SND_PCI_QUIRK(0x17aa, 0x3878, "Lenovo Legion 7 Slim 16ARHA7", ALC287_FIXUP_CS35L41_I2C_2),
index 69c68d8e7a6b54fc1fcfc3fac3a73d58403071f5..1760b5d42460afe83f3d50dbcdd73daf9455fc12 100644 (file)
@@ -430,6 +430,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "MRID6"),
                }
        },
+       {
+               .driver_data = &acp6x_card,
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "MDC"),
+                       DMI_MATCH(DMI_BOARD_NAME, "Herbag_MDU"),
+               }
+       },
        {
                .driver_data = &acp6x_card,
                .matches = {
index dfb4ce53491bbaa8800b8e7b3e2737a0a1be47bc..f8e57a2fc3e324fcf4cdefde12b3a8db24239af5 100644 (file)
@@ -1094,6 +1094,7 @@ static int cs35l41_handle_pdata(struct device *dev, struct cs35l41_hw_cfg *hw_cf
 static int cs35l41_dsp_init(struct cs35l41_private *cs35l41)
 {
        struct wm_adsp *dsp;
+       uint32_t dsp1rx5_src;
        int ret;
 
        dsp = &cs35l41->dsp;
@@ -1113,16 +1114,29 @@ static int cs35l41_dsp_init(struct cs35l41_private *cs35l41)
                return ret;
        }
 
-       ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX5_SRC,
-                          CS35L41_INPUT_SRC_VPMON);
+       switch (cs35l41->hw_cfg.bst_type) {
+       case CS35L41_INT_BOOST:
+       case CS35L41_SHD_BOOST_ACTV:
+               dsp1rx5_src = CS35L41_INPUT_SRC_VPMON;
+               break;
+       case CS35L41_EXT_BOOST:
+       case CS35L41_SHD_BOOST_PASS:
+               dsp1rx5_src = CS35L41_INPUT_SRC_VBSTMON;
+               break;
+       default:
+               dev_err(cs35l41->dev, "wm_halo_init failed - Invalid Boost Type: %d\n",
+                       cs35l41->hw_cfg.bst_type);
+               goto err_dsp;
+       }
+
+       ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX5_SRC, dsp1rx5_src);
        if (ret < 0) {
-               dev_err(cs35l41->dev, "Write INPUT_SRC_VPMON failed: %d\n", ret);
+               dev_err(cs35l41->dev, "Write DSP1RX5_SRC: %d failed: %d\n", dsp1rx5_src, ret);
                goto err_dsp;
        }
-       ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX6_SRC,
-                          CS35L41_INPUT_SRC_CLASSH);
+       ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX6_SRC, CS35L41_INPUT_SRC_VBSTMON);
        if (ret < 0) {
-               dev_err(cs35l41->dev, "Write INPUT_SRC_CLASSH failed: %d\n", ret);
+               dev_err(cs35l41->dev, "Write CS35L41_INPUT_SRC_VBSTMON failed: %d\n", ret);
                goto err_dsp;
        }
        ret = regmap_write(cs35l41->regmap, CS35L41_DSP1_RX7_SRC,
index 14a5f86019aad47390ee8f3962a596296dfbf437..70ff55c1517fe02d2a75e7d683e58edcb01947d0 100644 (file)
@@ -188,8 +188,6 @@ static void cs35l56_sdw_init(struct sdw_slave *peripheral)
                        goto out;
        }
 
-       regcache_cache_only(cs35l56->base.regmap, false);
-
        ret = cs35l56_init(cs35l56);
        if (ret < 0) {
                regcache_cache_only(cs35l56->base.regmap, true);
index 08cac58e3ab2223c82b4da10314f04b2f055b5b6..fd02b621da52c0c68420f995764e752b6a789597 100644 (file)
@@ -40,16 +40,11 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_set_patch, SND_SOC_CS35L56_SHARED);
 static const struct reg_default cs35l56_reg_defaults[] = {
        /* no defaults for OTP_MEM - first read populates cache */
 
-       { CS35L56_ASP1_ENABLES1,                0x00000000 },
-       { CS35L56_ASP1_CONTROL1,                0x00000028 },
-       { CS35L56_ASP1_CONTROL2,                0x18180200 },
-       { CS35L56_ASP1_CONTROL3,                0x00000002 },
-       { CS35L56_ASP1_FRAME_CONTROL1,          0x03020100 },
-       { CS35L56_ASP1_FRAME_CONTROL5,          0x00020100 },
-       { CS35L56_ASP1_DATA_CONTROL1,           0x00000018 },
-       { CS35L56_ASP1_DATA_CONTROL5,           0x00000018 },
-
-       /* no defaults for ASP1TX mixer */
+       /*
+        * No defaults for ASP1 control or ASP1TX mixer. See
+        * cs35l56_populate_asp1_register_defaults() and
+        * cs35l56_sync_asp1_mixer_widgets_with_firmware().
+        */
 
        { CS35L56_SWIRE_DP3_CH1_INPUT,          0x00000018 },
        { CS35L56_SWIRE_DP3_CH2_INPUT,          0x00000019 },
@@ -210,6 +205,36 @@ static bool cs35l56_volatile_reg(struct device *dev, unsigned int reg)
        }
 }
 
+static const struct reg_sequence cs35l56_asp1_defaults[] = {
+       REG_SEQ0(CS35L56_ASP1_ENABLES1,         0x00000000),
+       REG_SEQ0(CS35L56_ASP1_CONTROL1,         0x00000028),
+       REG_SEQ0(CS35L56_ASP1_CONTROL2,         0x18180200),
+       REG_SEQ0(CS35L56_ASP1_CONTROL3,         0x00000002),
+       REG_SEQ0(CS35L56_ASP1_FRAME_CONTROL1,   0x03020100),
+       REG_SEQ0(CS35L56_ASP1_FRAME_CONTROL5,   0x00020100),
+       REG_SEQ0(CS35L56_ASP1_DATA_CONTROL1,    0x00000018),
+       REG_SEQ0(CS35L56_ASP1_DATA_CONTROL5,    0x00000018),
+};
+
+/*
+ * The firmware can have control of the ASP so we don't provide regmap
+ * with defaults for these registers, to prevent a regcache_sync() from
+ * overwriting the firmware settings. But if the machine driver hooks up
+ * the ASP it means the driver is taking control of the ASP, so then the
+ * registers are populated with the defaults.
+ */
+int cs35l56_init_asp1_regs_for_driver_control(struct cs35l56_base *cs35l56_base)
+{
+       if (!cs35l56_base->fw_owns_asp1)
+               return 0;
+
+       cs35l56_base->fw_owns_asp1 = false;
+
+       return regmap_multi_reg_write(cs35l56_base->regmap, cs35l56_asp1_defaults,
+                                     ARRAY_SIZE(cs35l56_asp1_defaults));
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_init_asp1_regs_for_driver_control, SND_SOC_CS35L56_SHARED);
+
 /*
  * The firmware boot sequence can overwrite the ASP1 config registers so that
  * they don't match regmap's view of their values. Rewrite the values from the
@@ -217,19 +242,15 @@ static bool cs35l56_volatile_reg(struct device *dev, unsigned int reg)
  */
 int cs35l56_force_sync_asp1_registers_from_cache(struct cs35l56_base *cs35l56_base)
 {
-       struct reg_sequence asp1_regs[] = {
-               { .reg = CS35L56_ASP1_ENABLES1 },
-               { .reg = CS35L56_ASP1_CONTROL1 },
-               { .reg = CS35L56_ASP1_CONTROL2 },
-               { .reg = CS35L56_ASP1_CONTROL3 },
-               { .reg = CS35L56_ASP1_FRAME_CONTROL1 },
-               { .reg = CS35L56_ASP1_FRAME_CONTROL5 },
-               { .reg = CS35L56_ASP1_DATA_CONTROL1 },
-               { .reg = CS35L56_ASP1_DATA_CONTROL5 },
-       };
+       struct reg_sequence asp1_regs[ARRAY_SIZE(cs35l56_asp1_defaults)];
        int i, ret;
 
-       /* Read values from regmap cache into a write sequence */
+       if (cs35l56_base->fw_owns_asp1)
+               return 0;
+
+       memcpy(asp1_regs, cs35l56_asp1_defaults, sizeof(asp1_regs));
+
+       /* Read current values from regmap cache into the write sequence */
        for (i = 0; i < ARRAY_SIZE(asp1_regs); ++i) {
                ret = regmap_read(cs35l56_base->regmap, asp1_regs[i].reg, &asp1_regs[i].def);
                if (ret)
@@ -307,10 +328,10 @@ int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base)
                reg = CS35L56_DSP1_HALO_STATE;
 
        /*
-        * This can't be a regmap_read_poll_timeout() because cs35l56 will NAK
-        * I2C until it has booted which would terminate the poll
+        * The regmap must remain in cache-only until the chip has
+        * booted, so use a bypassed read of the status register.
         */
-       poll_ret = read_poll_timeout(regmap_read, read_ret,
+       poll_ret = read_poll_timeout(regmap_read_bypassed, read_ret,
                                     (val < 0xFFFF) && (val >= CS35L56_HALO_STATE_BOOT_DONE),
                                     CS35L56_HALO_STATE_POLL_US,
                                     CS35L56_HALO_STATE_TIMEOUT_US,
@@ -362,7 +383,8 @@ void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire)
                return;
 
        cs35l56_wait_control_port_ready();
-       regcache_cache_only(cs35l56_base->regmap, false);
+
+       /* Leave in cache-only. This will be revoked when the chip has rebooted. */
 }
 EXPORT_SYMBOL_NS_GPL(cs35l56_system_reset, SND_SOC_CS35L56_SHARED);
 
@@ -577,14 +599,14 @@ int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_sou
                cs35l56_issue_wake_event(cs35l56_base);
 
 out_sync:
-       regcache_cache_only(cs35l56_base->regmap, false);
-
        ret = cs35l56_wait_for_firmware_boot(cs35l56_base);
        if (ret) {
                dev_err(cs35l56_base->dev, "Hibernate wake failed: %d\n", ret);
                goto err;
        }
 
+       regcache_cache_only(cs35l56_base->regmap, false);
+
        ret = cs35l56_mbox_send(cs35l56_base, CS35L56_MBOX_CMD_PREVENT_AUTO_HIBERNATE);
        if (ret)
                goto err;
@@ -684,7 +706,7 @@ EXPORT_SYMBOL_NS_GPL(cs35l56_calibration_controls, SND_SOC_CS35L56_SHARED);
 
 int cs35l56_get_calibration(struct cs35l56_base *cs35l56_base)
 {
-       u64 silicon_uid;
+       u64 silicon_uid = 0;
        int ret;
 
        /* Driver can't apply calibration to a secured part, so skip */
@@ -757,7 +779,7 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
         * devices so the REVID needs to be determined before waiting for the
         * firmware to boot.
         */
-       ret = regmap_read(cs35l56_base->regmap, CS35L56_REVID, &revid);
+       ret = regmap_read_bypassed(cs35l56_base->regmap, CS35L56_REVID, &revid);
        if (ret < 0) {
                dev_err(cs35l56_base->dev, "Get Revision ID failed\n");
                return ret;
@@ -768,7 +790,7 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
        if (ret)
                return ret;
 
-       ret = regmap_read(cs35l56_base->regmap, CS35L56_DEVID, &devid);
+       ret = regmap_read_bypassed(cs35l56_base->regmap, CS35L56_DEVID, &devid);
        if (ret < 0) {
                dev_err(cs35l56_base->dev, "Get Device ID failed\n");
                return ret;
@@ -787,6 +809,9 @@ int cs35l56_hw_init(struct cs35l56_base *cs35l56_base)
 
        cs35l56_base->type = devid & 0xFF;
 
+       /* Silicon is now identified and booted so exit cache-only */
+       regcache_cache_only(cs35l56_base->regmap, false);
+
        ret = regmap_read(cs35l56_base->regmap, CS35L56_DSP_RESTRICT_STS1, &secured);
        if (ret) {
                dev_err(cs35l56_base->dev, "Get Secure status failed\n");
index 8d2f021fb362812a73d07d10565a23d2b77f6a38..4986e78105daf9ca949815835c94321ad19dfb68 100644 (file)
@@ -454,9 +454,14 @@ static int cs35l56_asp_dai_set_fmt(struct snd_soc_dai *codec_dai, unsigned int f
 {
        struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(codec_dai->component);
        unsigned int val;
+       int ret;
 
        dev_dbg(cs35l56->base.dev, "%s: %#x\n", __func__, fmt);
 
+       ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base);
+       if (ret)
+               return ret;
+
        switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) {
        case SND_SOC_DAIFMT_CBC_CFC:
                break;
@@ -530,6 +535,11 @@ static int cs35l56_asp_dai_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx
                                        unsigned int rx_mask, int slots, int slot_width)
 {
        struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component);
+       int ret;
+
+       ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base);
+       if (ret)
+               return ret;
 
        if ((slots == 0) || (slot_width == 0)) {
                dev_dbg(cs35l56->base.dev, "tdm config cleared\n");
@@ -578,6 +588,11 @@ static int cs35l56_asp_dai_hw_params(struct snd_pcm_substream *substream,
        struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component);
        unsigned int rate = params_rate(params);
        u8 asp_width, asp_wl;
+       int ret;
+
+       ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base);
+       if (ret)
+               return ret;
 
        asp_wl = params_width(params);
        if (cs35l56->asp_slot_width)
@@ -634,7 +649,11 @@ static int cs35l56_asp_dai_set_sysclk(struct snd_soc_dai *dai,
                                      int clk_id, unsigned int freq, int dir)
 {
        struct cs35l56_private *cs35l56 = snd_soc_component_get_drvdata(dai->component);
-       int freq_id;
+       int freq_id, ret;
+
+       ret = cs35l56_init_asp1_regs_for_driver_control(&cs35l56->base);
+       if (ret)
+               return ret;
 
        if (freq == 0) {
                cs35l56->sysclk_set = false;
@@ -1341,6 +1360,7 @@ static int cs35l56_try_get_broken_sdca_spkid_gpio(struct cs35l56_private *cs35l5
                                    "spk-id-gpios", ACPI_TYPE_PACKAGE, &obj);
        if (ret) {
                dev_dbg(cs35l56->base.dev, "Could not get spk-id-gpios package: %d\n", ret);
+               fwnode_handle_put(af01_fwnode);
                return -ENOENT;
        }
 
@@ -1348,6 +1368,7 @@ static int cs35l56_try_get_broken_sdca_spkid_gpio(struct cs35l56_private *cs35l5
        if (obj->package.count != 4) {
                dev_warn(cs35l56->base.dev, "Unexpected spk-id element count %d\n",
                         obj->package.count);
+               fwnode_handle_put(af01_fwnode);
                return -ENOENT;
        }
 
@@ -1362,6 +1383,7 @@ static int cs35l56_try_get_broken_sdca_spkid_gpio(struct cs35l56_private *cs35l5
                 */
                ret = acpi_dev_add_driver_gpios(adev, cs35l56_af01_spkid_gpios_mapping);
                if (ret) {
+                       fwnode_handle_put(af01_fwnode);
                        return dev_err_probe(cs35l56->base.dev, ret,
                                             "Failed to add gpio mapping to AF01\n");
                }
@@ -1369,14 +1391,17 @@ static int cs35l56_try_get_broken_sdca_spkid_gpio(struct cs35l56_private *cs35l5
                ret = devm_add_action_or_reset(cs35l56->base.dev,
                                               cs35l56_acpi_dev_release_driver_gpios,
                                               adev);
-               if (ret)
+               if (ret) {
+                       fwnode_handle_put(af01_fwnode);
                        return ret;
+               }
 
                dev_dbg(cs35l56->base.dev, "Added spk-id-gpios mapping to AF01\n");
        }
 
        desc = fwnode_gpiod_get_index(af01_fwnode, "spk-id", 0, GPIOD_IN, NULL);
        if (IS_ERR(desc)) {
+               fwnode_handle_put(af01_fwnode);
                ret = PTR_ERR(desc);
                return dev_err_probe(cs35l56->base.dev, ret, "Get GPIO from AF01 failed\n");
        }
@@ -1385,9 +1410,12 @@ static int cs35l56_try_get_broken_sdca_spkid_gpio(struct cs35l56_private *cs35l5
        gpiod_put(desc);
 
        if (ret < 0) {
+               fwnode_handle_put(af01_fwnode);
                dev_err_probe(cs35l56->base.dev, ret, "Error reading spk-id GPIO\n");
                return ret;
-               }
+       }
+
+       fwnode_handle_put(af01_fwnode);
 
        dev_info(cs35l56->base.dev, "Got spk-id from AF01\n");
 
@@ -1403,6 +1431,9 @@ int cs35l56_common_probe(struct cs35l56_private *cs35l56)
        cs35l56->base.cal_index = -1;
        cs35l56->speaker_id = -ENOENT;
 
+       /* Assume that the firmware owns ASP1 until we know different */
+       cs35l56->base.fw_owns_asp1 = true;
+
        dev_set_drvdata(cs35l56->base.dev, cs35l56);
 
        cs35l56_fill_supply_names(cs35l56->supplies);
@@ -1531,6 +1562,8 @@ post_soft_reset:
                        return ret;
 
                dev_dbg(cs35l56->base.dev, "Firmware rebooted after soft reset\n");
+
+               regcache_cache_only(cs35l56->base.regmap, false);
        }
 
        /* Disable auto-hibernate so that runtime_pm has control */
index 6bc068cdcbe2a89e1488530bfa4e6e9c49c0651c..15e5e3eb592b3008fb47ea4372fed71a08375f01 100644 (file)
@@ -671,8 +671,10 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
                return NULL;
 
        aad_pdata = devm_kzalloc(dev, sizeof(*aad_pdata), GFP_KERNEL);
-       if (!aad_pdata)
+       if (!aad_pdata) {
+               fwnode_handle_put(aad_np);
                return NULL;
+       }
 
        aad_pdata->irq = i2c->irq;
 
@@ -753,6 +755,8 @@ static struct da7219_aad_pdata *da7219_aad_fw_to_pdata(struct device *dev)
        else
                aad_pdata->adc_1bit_rpt = DA7219_AAD_ADC_1BIT_RPT_1;
 
+       fwnode_handle_put(aad_np);
+
        return aad_pdata;
 }
 
index e3ba04484813a21904727a09725c608bf27c34a2..d0d24a53df7462cfe88d5d2457078fc1f144f22b 100644 (file)
@@ -444,6 +444,7 @@ struct rt5645_priv {
        struct regmap *regmap;
        struct i2c_client *i2c;
        struct gpio_desc *gpiod_hp_det;
+       struct gpio_desc *gpiod_cbj_sleeve;
        struct snd_soc_jack *hp_jack;
        struct snd_soc_jack *mic_jack;
        struct snd_soc_jack *btn_jack;
@@ -3186,6 +3187,9 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
                regmap_update_bits(rt5645->regmap, RT5645_IN1_CTRL2,
                        RT5645_CBJ_MN_JD, 0);
 
+               if (rt5645->gpiod_cbj_sleeve)
+                       gpiod_set_value(rt5645->gpiod_cbj_sleeve, 1);
+
                msleep(600);
                regmap_read(rt5645->regmap, RT5645_IN1_CTRL3, &val);
                val &= 0x7;
@@ -3202,6 +3206,8 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
                        snd_soc_dapm_disable_pin(dapm, "Mic Det Power");
                        snd_soc_dapm_sync(dapm);
                        rt5645->jack_type = SND_JACK_HEADPHONE;
+                       if (rt5645->gpiod_cbj_sleeve)
+                               gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
                }
                if (rt5645->pdata.level_trigger_irq)
                        regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
@@ -3229,6 +3235,9 @@ static int rt5645_jack_detect(struct snd_soc_component *component, int jack_inse
                if (rt5645->pdata.level_trigger_irq)
                        regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
                                RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
+
+               if (rt5645->gpiod_cbj_sleeve)
+                       gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
        }
 
        return rt5645->jack_type;
@@ -4012,6 +4021,16 @@ static int rt5645_i2c_probe(struct i2c_client *i2c)
                        return ret;
        }
 
+       rt5645->gpiod_cbj_sleeve = devm_gpiod_get_optional(&i2c->dev, "cbj-sleeve",
+                                                          GPIOD_OUT_LOW);
+
+       if (IS_ERR(rt5645->gpiod_cbj_sleeve)) {
+               ret = PTR_ERR(rt5645->gpiod_cbj_sleeve);
+               dev_info(&i2c->dev, "failed to initialize gpiod, ret=%d\n", ret);
+               if (ret != -ENOENT)
+                       return ret;
+       }
+
        for (i = 0; i < ARRAY_SIZE(rt5645->supplies); i++)
                rt5645->supplies[i].supply = rt5645_supply_names[i];
 
@@ -4259,6 +4278,9 @@ static void rt5645_i2c_remove(struct i2c_client *i2c)
        cancel_delayed_work_sync(&rt5645->jack_detect_work);
        cancel_delayed_work_sync(&rt5645->rcclock_work);
 
+       if (rt5645->gpiod_cbj_sleeve)
+               gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
+
        regulator_bulk_disable(ARRAY_SIZE(rt5645->supplies), rt5645->supplies);
 }
 
@@ -4274,6 +4296,9 @@ static void rt5645_i2c_shutdown(struct i2c_client *i2c)
                0);
        msleep(20);
        regmap_write(rt5645->regmap, RT5645_RESET, 0);
+
+       if (rt5645->gpiod_cbj_sleeve)
+               gpiod_set_value(rt5645->gpiod_cbj_sleeve, 0);
 }
 
 static int __maybe_unused rt5645_sys_suspend(struct device *dev)
index 3fb7b9adb61de628705d784fbe64e259bf031089..bc3579203c7a4770cb657b22965a12bc2becefe8 100644 (file)
@@ -316,7 +316,7 @@ static int rt715_sdca_set_amp_gain_8ch_get(struct snd_kcontrol *kcontrol,
        return 0;
 }
 
-static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -17625, 375, 0);
+static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -1725, 75, 0);
 static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, 0, 1000, 0);
 
 static int rt715_sdca_get_volsw(struct snd_kcontrol *kcontrol,
@@ -477,7 +477,7 @@ static const struct snd_kcontrol_new rt715_sdca_snd_controls[] = {
                        RT715_SDCA_FU_VOL_CTRL, CH_01),
                SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC7_27_VOL,
                        RT715_SDCA_FU_VOL_CTRL, CH_02),
-                       0x2f, 0x7f, 0,
+                       0x2f, 0x3f, 0,
                rt715_sdca_set_amp_gain_get, rt715_sdca_set_amp_gain_put,
                in_vol_tlv),
        RT715_SDCA_EXT_TLV("FU02 Capture Volume",
@@ -485,13 +485,13 @@ static const struct snd_kcontrol_new rt715_sdca_snd_controls[] = {
                        RT715_SDCA_FU_VOL_CTRL, CH_01),
                rt715_sdca_set_amp_gain_4ch_get,
                rt715_sdca_set_amp_gain_4ch_put,
-               in_vol_tlv, 4, 0x7f),
+               in_vol_tlv, 4, 0x3f),
        RT715_SDCA_EXT_TLV("FU06 Capture Volume",
                SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_ADC10_11_VOL,
                        RT715_SDCA_FU_VOL_CTRL, CH_01),
                rt715_sdca_set_amp_gain_4ch_get,
                rt715_sdca_set_amp_gain_4ch_put,
-               in_vol_tlv, 4, 0x7f),
+               in_vol_tlv, 4, 0x3f),
        /* MIC Boost Control */
        RT715_SDCA_BOOST_EXT_TLV("FU0E Boost",
                SDW_SDCA_CTL(FUN_MIC_ARRAY, RT715_SDCA_FU_DMIC_GAIN_EN,
index 7e13868ff99f03110c165dcd706cff46a8eeba5d..f012fe0ded6d2871217f5fa02c49ee0158ad5101 100644 (file)
@@ -111,6 +111,7 @@ static bool rt715_readable_register(struct device *dev, unsigned int reg)
        case 0x839d:
        case 0x83a7:
        case 0x83a9:
+       case 0x752001:
        case 0x752039:
                return true;
        default:
index e0ea3a23f7cc6844691338ff8daae7f2843d2c6e..e5bd9ef812de13378801d82356b54ee4144f219a 100644 (file)
@@ -1330,7 +1330,7 @@ static struct snd_soc_dai_driver rt722_sdca_dai[] = {
                .capture = {
                        .stream_name = "DP6 DMic Capture",
                        .channels_min = 1,
-                       .channels_max = 2,
+                       .channels_max = 4,
                        .rates = RT722_STEREO_RATES,
                        .formats = RT722_FORMATS,
                },
@@ -1439,9 +1439,12 @@ static void rt722_sdca_jack_preset(struct rt722_sdca_priv *rt722)
        int loop_check, chk_cnt = 100, ret;
        unsigned int calib_status = 0;
 
-       /* Read eFuse */
-       rt722_sdca_index_write(rt722, RT722_VENDOR_SPK_EFUSE, RT722_DC_CALIB_CTRL,
-               0x4808);
+       /* Config analog bias */
+       rt722_sdca_index_write(rt722, RT722_VENDOR_REG, RT722_ANALOG_BIAS_CTL3,
+               0xa081);
+       /* GE related settings */
+       rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_GE_RELATED_CTL2,
+               0xa009);
        /* Button A, B, C, D bypass mode */
        rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_UMP_HID_CTL4,
                0xcf00);
@@ -1475,9 +1478,6 @@ static void rt722_sdca_jack_preset(struct rt722_sdca_priv *rt722)
                if ((calib_status & 0x0040) == 0x0)
                        break;
        }
-       /* Release HP-JD, EN_CBJ_TIE_GL/R open, en_osw gating auto done bit */
-       rt722_sdca_index_write(rt722, RT722_VENDOR_REG, RT722_DIGITAL_MISC_CTRL4,
-               0x0010);
        /* Set ADC09 power entity floating control */
        rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_ADC0A_08_PDE_FLOAT_CTL,
                0x2a12);
@@ -1490,8 +1490,21 @@ static void rt722_sdca_jack_preset(struct rt722_sdca_priv *rt722)
        /* Set DAC03 and HP power entity floating control */
        rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_DAC03_HP_PDE_FLOAT_CTL,
                0x4040);
+       rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_ENT_FLOAT_CTRL_1,
+               0x4141);
+       rt722_sdca_index_write(rt722, RT722_VENDOR_HDA_CTL, RT722_FLOAT_CTRL_1,
+               0x0101);
        /* Fine tune PDE40 latency */
        regmap_write(rt722->regmap, 0x2f58, 0x07);
+       regmap_write(rt722->regmap, 0x2f03, 0x06);
+       /* MIC VRefo */
+       rt722_sdca_index_update_bits(rt722, RT722_VENDOR_REG,
+               RT722_COMBO_JACK_AUTO_CTL1, 0x0200, 0x0200);
+       rt722_sdca_index_update_bits(rt722, RT722_VENDOR_REG,
+               RT722_VREFO_GAT, 0x4000, 0x4000);
+       /* Release HP-JD, EN_CBJ_TIE_GL/R open, en_osw gating auto done bit */
+       rt722_sdca_index_write(rt722, RT722_VENDOR_REG, RT722_DIGITAL_MISC_CTRL4,
+               0x0010);
 }
 
 int rt722_sdca_io_init(struct device *dev, struct sdw_slave *slave)
index 44af8901352eb6817e9a6dddd76fc7325a3660ba..2464361a7958c6904d1eca01fb148166a4743eff 100644 (file)
@@ -69,6 +69,7 @@ struct rt722_sdca_dmic_kctrl_priv {
 #define RT722_COMBO_JACK_AUTO_CTL2             0x46
 #define RT722_COMBO_JACK_AUTO_CTL3             0x47
 #define RT722_DIGITAL_MISC_CTRL4               0x4a
+#define RT722_VREFO_GAT                                0x63
 #define RT722_FSM_CTL                          0x67
 #define RT722_SDCA_INTR_REC                    0x82
 #define RT722_SW_CONFIG1                       0x8a
@@ -127,6 +128,8 @@ struct rt722_sdca_dmic_kctrl_priv {
 #define RT722_UMP_HID_CTL6                     0x66
 #define RT722_UMP_HID_CTL7                     0x67
 #define RT722_UMP_HID_CTL8                     0x68
+#define RT722_FLOAT_CTRL_1                     0x70
+#define RT722_ENT_FLOAT_CTRL_1         0x76
 
 /* Parameter & Verb control 01 (0x1a)(NID:20h) */
 #define RT722_HIDDEN_REG_SW_RESET (0x1 << 14)
index 3c025dabaf7a47f180b6f5bdaf6d6f67d128956d..1253695bebd863cab4accfef0616250fb3006830 100644 (file)
@@ -1155,6 +1155,7 @@ static int wsa881x_probe(struct sdw_slave *pdev,
        pdev->prop.sink_ports = GENMASK(WSA881X_MAX_SWR_PORTS, 0);
        pdev->prop.sink_dpn_prop = wsa_sink_dpn_prop;
        pdev->prop.scp_int1_mask = SDW_SCP_INT1_BUS_CLASH | SDW_SCP_INT1_PARITY;
+       pdev->prop.clk_stop_mode1 = true;
        gpiod_direction_output(wsa881x->sd_n, !wsa881x->sd_n_val);
 
        wsa881x->regmap = devm_regmap_init_sdw(pdev, &wsa881x_regmap_config);
index 9d9921e1cd4dedcb2ff4f8e9b59a3b4560d7fa4c..d2554c8577326b533764862829e84b8e001f7016 100644 (file)
@@ -64,7 +64,7 @@ struct avs_icl_memwnd2_desc {
 struct avs_icl_memwnd2 {
        union {
                struct avs_icl_memwnd2_desc slot_desc[AVS_ICL_MEMWND2_SLOTS_COUNT];
-               u8 rsvd[PAGE_SIZE];
+               u8 rsvd[SZ_4K];
        };
        u8 slot_array[AVS_ICL_MEMWND2_SLOTS_COUNT][PAGE_SIZE];
 } __packed;
index 13061bd1488bb4dd506b14aed5c87967bc19aee6..42b42903ae9de7919c6620a5bfba2ef3b54ffb2c 100644 (file)
@@ -1582,6 +1582,8 @@ static int avs_widget_load(struct snd_soc_component *comp, int index,
        if (!le32_to_cpu(dw->priv.size))
                return 0;
 
+       w->no_wname_in_kcontrol_name = true;
+
        if (w->ignore_suspend && !AVS_S0IX_SUPPORTED) {
                dev_info_once(comp->dev, "Device does not support S0IX, check BIOS settings\n");
                w->ignore_suspend = false;
index 05f38d1f7d824dc0f46c5262e0f6616dce1b7411..b41a1147f1c3455de581c729efc72517b35bc6c7 100644 (file)
@@ -636,28 +636,30 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
                                        BYT_RT5640_USE_AMCR0F28),
        },
        {
+               /* Asus T100TAF, unlike other T100TA* models this one has a mono speaker */
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
                },
                .driver_data = (void *)(BYT_RT5640_IN1_MAP |
                                        BYT_RT5640_JD_SRC_JD2_IN4N |
                                        BYT_RT5640_OVCD_TH_2000UA |
                                        BYT_RT5640_OVCD_SF_0P75 |
+                                       BYT_RT5640_MONO_SPEAKER |
+                                       BYT_RT5640_DIFF_MIC |
+                                       BYT_RT5640_SSP0_AIF2 |
                                        BYT_RT5640_MCLK_EN),
        },
        {
+               /* Asus T100TA and T100TAM, must come after T100TAF (mono spk) match */
                .matches = {
-                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
-                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TAF"),
+                       DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"),
                },
                .driver_data = (void *)(BYT_RT5640_IN1_MAP |
                                        BYT_RT5640_JD_SRC_JD2_IN4N |
                                        BYT_RT5640_OVCD_TH_2000UA |
                                        BYT_RT5640_OVCD_SF_0P75 |
-                                       BYT_RT5640_MONO_SPEAKER |
-                                       BYT_RT5640_DIFF_MIC |
-                                       BYT_RT5640_SSP0_AIF2 |
                                        BYT_RT5640_MCLK_EN),
        },
        {
index b93ea33739f29ddd9b987d7896e9fa2f37c58ccf..6458d5dc4902f665211bb9e4ae7d274e4bff2fdc 100644 (file)
@@ -99,6 +99,7 @@ config SND_MESON_AXG_PDM
 
 config SND_MESON_CARD_UTILS
        tristate
+       select SND_DYNAMIC_MINORS
 
 config SND_MESON_CODEC_GLUE
        tristate
index 3180aa4d3a157e2718b7b3b4bf34a239a68f33e1..8c5605c1e34e8a1bf29e4256ba9625b94205c762 100644 (file)
@@ -318,6 +318,7 @@ static int axg_card_add_link(struct snd_soc_card *card, struct device_node *np,
 
        dai_link->cpus = cpu;
        dai_link->num_cpus = 1;
+       dai_link->nonatomic = true;
 
        ret = meson_card_parse_dai(card, np, dai_link->cpus);
        if (ret)
index bebee0ca8e3889a920a486b01c1a8730eb1988d4..ecb3eb7a9723ddf60e565b95e46b7f31ce1544cb 100644 (file)
@@ -204,18 +204,26 @@ static irqreturn_t axg_fifo_pcm_irq_block(int irq, void *dev_id)
        unsigned int status;
 
        regmap_read(fifo->map, FIFO_STATUS1, &status);
-
        status = FIELD_GET(STATUS1_INT_STS, status);
+       axg_fifo_ack_irq(fifo, status);
+
+       /* Use the thread to call period elapsed on nonatomic links */
        if (status & FIFO_INT_COUNT_REPEAT)
-               snd_pcm_period_elapsed(ss);
-       else
-               dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
-                       status);
+               return IRQ_WAKE_THREAD;
 
-       /* Ack irqs */
-       axg_fifo_ack_irq(fifo, status);
+       dev_dbg(axg_fifo_dev(ss), "unexpected irq - STS 0x%02x\n",
+               status);
+
+       return IRQ_NONE;
+}
+
+static irqreturn_t axg_fifo_pcm_irq_block_thread(int irq, void *dev_id)
+{
+       struct snd_pcm_substream *ss = dev_id;
+
+       snd_pcm_period_elapsed(ss);
 
-       return IRQ_RETVAL(status);
+       return IRQ_HANDLED;
 }
 
 int axg_fifo_pcm_open(struct snd_soc_component *component,
@@ -243,8 +251,9 @@ int axg_fifo_pcm_open(struct snd_soc_component *component,
        if (ret)
                return ret;
 
-       ret = request_irq(fifo->irq, axg_fifo_pcm_irq_block, 0,
-                         dev_name(dev), ss);
+       ret = request_threaded_irq(fifo->irq, axg_fifo_pcm_irq_block,
+                                  axg_fifo_pcm_irq_block_thread,
+                                  IRQF_ONESHOT, dev_name(dev), ss);
        if (ret)
                return ret;
 
index 63333a2b0a9c363d6882d3fe50ecff78da390316..a6579efd37750db01add3c491bb7a8443a76dde7 100644 (file)
@@ -392,6 +392,46 @@ void axg_tdm_stream_free(struct axg_tdm_stream *ts)
 }
 EXPORT_SYMBOL_GPL(axg_tdm_stream_free);
 
+int axg_tdm_stream_set_cont_clocks(struct axg_tdm_stream *ts,
+                                  unsigned int fmt)
+{
+       int ret = 0;
+
+       if (fmt & SND_SOC_DAIFMT_CONT) {
+               /* Clock are already enabled - skipping */
+               if (ts->clk_enabled)
+                       return 0;
+
+               ret = clk_prepare_enable(ts->iface->mclk);
+               if (ret)
+                       return ret;
+
+               ret = clk_prepare_enable(ts->iface->sclk);
+               if (ret)
+                       goto err_sclk;
+
+               ret = clk_prepare_enable(ts->iface->lrclk);
+               if (ret)
+                       goto err_lrclk;
+
+               ts->clk_enabled = true;
+               return 0;
+       }
+
+       /* Clocks are already disabled - skipping */
+       if (!ts->clk_enabled)
+               return 0;
+
+       clk_disable_unprepare(ts->iface->lrclk);
+err_lrclk:
+       clk_disable_unprepare(ts->iface->sclk);
+err_sclk:
+       clk_disable_unprepare(ts->iface->mclk);
+       ts->clk_enabled = false;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(axg_tdm_stream_set_cont_clocks);
+
 MODULE_DESCRIPTION("Amlogic AXG TDM formatter driver");
 MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
 MODULE_LICENSE("GPL v2");
index bf708717635bf6531195a1039aee41c3166d8154..62057c71f742e68e4555428281157ebb265b1cc2 100644 (file)
@@ -309,6 +309,7 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
                                   struct snd_soc_dai *dai)
 {
        struct axg_tdm_iface *iface = snd_soc_dai_get_drvdata(dai);
+       struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
        int ret;
 
        switch (iface->fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
@@ -346,7 +347,11 @@ static int axg_tdm_iface_hw_params(struct snd_pcm_substream *substream,
                        return ret;
        }
 
-       return 0;
+       ret = axg_tdm_stream_set_cont_clocks(ts, iface->fmt);
+       if (ret)
+               dev_err(dai->dev, "failed to apply continuous clock setting\n");
+
+       return ret;
 }
 
 static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
@@ -354,19 +359,32 @@ static int axg_tdm_iface_hw_free(struct snd_pcm_substream *substream,
 {
        struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
 
-       /* Stop all attached formatters */
-       axg_tdm_stream_stop(ts);
-
-       return 0;
+       return axg_tdm_stream_set_cont_clocks(ts, 0);
 }
 
-static int axg_tdm_iface_prepare(struct snd_pcm_substream *substream,
+static int axg_tdm_iface_trigger(struct snd_pcm_substream *substream,
+                                int cmd,
                                 struct snd_soc_dai *dai)
 {
-       struct axg_tdm_stream *ts = snd_soc_dai_get_dma_data(dai, substream);
+       struct axg_tdm_stream *ts =
+               snd_soc_dai_get_dma_data(dai, substream);
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               axg_tdm_stream_start(ts);
+               break;
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+       case SNDRV_PCM_TRIGGER_STOP:
+               axg_tdm_stream_stop(ts);
+               break;
+       default:
+               return -EINVAL;
+       }
 
-       /* Force all attached formatters to update */
-       return axg_tdm_stream_reset(ts);
+       return 0;
 }
 
 static int axg_tdm_iface_remove_dai(struct snd_soc_dai *dai)
@@ -412,8 +430,8 @@ static const struct snd_soc_dai_ops axg_tdm_iface_ops = {
        .set_fmt        = axg_tdm_iface_set_fmt,
        .startup        = axg_tdm_iface_startup,
        .hw_params      = axg_tdm_iface_hw_params,
-       .prepare        = axg_tdm_iface_prepare,
        .hw_free        = axg_tdm_iface_hw_free,
+       .trigger        = axg_tdm_iface_trigger,
 };
 
 /* TDM Backend DAIs */
index 42f7470b9a7f41fd948948c54d58ea30c49831be..daaca10fec9e2bff82164ccccea75f1aefadaba1 100644 (file)
@@ -58,12 +58,17 @@ struct axg_tdm_stream {
        unsigned int physical_width;
        u32 *mask;
        bool ready;
+
+       /* For continuous clock tracking */
+       bool clk_enabled;
 };
 
 struct axg_tdm_stream *axg_tdm_stream_alloc(struct axg_tdm_iface *iface);
 void axg_tdm_stream_free(struct axg_tdm_stream *ts);
 int axg_tdm_stream_start(struct axg_tdm_stream *ts);
 void axg_tdm_stream_stop(struct axg_tdm_stream *ts);
+int axg_tdm_stream_set_cont_clocks(struct axg_tdm_stream *ts,
+                                  unsigned int fmt);
 
 static inline int axg_tdm_stream_reset(struct axg_tdm_stream *ts)
 {
index cc84d4c81be9d363d701b1d5c658e26a62079435..238bda5f6b76fa48b186514398840af05644f279 100644 (file)
@@ -350,7 +350,9 @@ static int sof_init_environment(struct snd_sof_dev *sdev)
        }
 
        ret = sof_select_ipc_and_paths(sdev);
-       if (!ret && plat_data->ipc_type != base_profile->ipc_type) {
+       if (ret) {
+               goto err_machine_check;
+       } else if (plat_data->ipc_type != base_profile->ipc_type) {
                /* IPC type changed, re-initialize the ops */
                sof_ops_free(sdev);
 
index 7c8aafca8fdef8eb65211dcd8cbead13077a6068..7275437ea8d8a59433e4aab0429acb4ce66cdf4a 100644 (file)
@@ -330,14 +330,32 @@ EXPORT_SYMBOL_GPL(snd_sof_dbg_memory_info_init);
 
 int snd_sof_dbg_init(struct snd_sof_dev *sdev)
 {
+       struct snd_sof_pdata *plat_data = sdev->pdata;
        struct snd_sof_dsp_ops *ops = sof_ops(sdev);
        const struct snd_sof_debugfs_map *map;
+       struct dentry *fw_profile;
        int i;
        int err;
 
        /* use "sof" as top level debugFS dir */
        sdev->debugfs_root = debugfs_create_dir("sof", NULL);
 
+       /* expose firmware/topology prefix/names for test purposes */
+       fw_profile = debugfs_create_dir("fw_profile", sdev->debugfs_root);
+
+       debugfs_create_str("fw_path", 0444, fw_profile,
+                          (char **)&plat_data->fw_filename_prefix);
+       debugfs_create_str("fw_lib_path", 0444, fw_profile,
+                          (char **)&plat_data->fw_lib_prefix);
+       debugfs_create_str("tplg_path", 0444, fw_profile,
+                          (char **)&plat_data->tplg_filename_prefix);
+       debugfs_create_str("fw_name", 0444, fw_profile,
+                          (char **)&plat_data->fw_filename);
+       debugfs_create_str("tplg_name", 0444, fw_profile,
+                          (char **)&plat_data->tplg_filename);
+       debugfs_create_u32("ipc_type", 0444, fw_profile,
+                          (u32 *)&plat_data->ipc_type);
+
        /* init dfsentry list */
        INIT_LIST_HEAD(&sdev->dfsentry_list);
 
index b26ffe767fab553467897b4facc13931668b27fe..b14e508f1f315b19585ad6851f8fc5f4b1d3891a 100644 (file)
@@ -35,6 +35,9 @@ static const struct sof_dev_desc lnl_desc = {
        .default_fw_path = {
                [SOF_IPC_TYPE_4] = "intel/sof-ipc4/lnl",
        },
+       .default_lib_path = {
+               [SOF_IPC_TYPE_4] = "intel/sof-ipc4-lib/lnl",
+       },
        .default_tplg_path = {
                [SOF_IPC_TYPE_4] = "intel/sof-ipc4-tplg",
        },
index 35769dd7905ebe6f66492f650a72d8624712f28a..af0bf354cb209e216b60ec0097ac88fb08b21475 100644 (file)
@@ -434,4 +434,5 @@ const struct sof_ipc_pcm_ops ipc3_pcm_ops = {
        .trigger = sof_ipc3_pcm_trigger,
        .dai_link_fixup = sof_ipc3_pcm_dai_link_fixup,
        .reset_hw_params_during_stop = true,
+       .d0i3_supported_in_s0ix = true,
 };
index e915f9f87a6c35d74f1cf7096accca70dce688da..4594470ed08b1e6e95af6e2aaa62da418281ce97 100644 (file)
@@ -37,6 +37,25 @@ struct sof_ipc4_timestamp_info {
        snd_pcm_sframes_t delay;
 };
 
+/**
+ * struct sof_ipc4_pcm_stream_priv - IPC4 specific private data
+ * @time_info: pointer to time info struct if it is supported, otherwise NULL
+ * @chain_dma_allocated: indicates the ChainDMA allocation state
+ */
+struct sof_ipc4_pcm_stream_priv {
+       struct sof_ipc4_timestamp_info *time_info;
+
+       bool chain_dma_allocated;
+};
+
+static inline struct sof_ipc4_timestamp_info *
+sof_ipc4_sps_to_time_info(struct snd_sof_pcm_stream *sps)
+{
+       struct sof_ipc4_pcm_stream_priv *stream_priv = sps->private;
+
+       return stream_priv->time_info;
+}
+
 static int sof_ipc4_set_multi_pipeline_state(struct snd_sof_dev *sdev, u32 state,
                                             struct ipc4_pipeline_set_state_data *trigger_list)
 {
@@ -253,14 +272,17 @@ sof_ipc4_update_pipeline_state(struct snd_sof_dev *sdev, int state, int cmd,
  */
 
 static int sof_ipc4_chain_dma_trigger(struct snd_sof_dev *sdev,
-                                     int direction,
+                                     struct snd_sof_pcm *spcm, int direction,
                                      struct snd_sof_pcm_stream_pipeline_list *pipeline_list,
                                      int state, int cmd)
 {
        struct sof_ipc4_fw_data *ipc4_data = sdev->private;
+       struct sof_ipc4_pcm_stream_priv *stream_priv;
        bool allocate, enable, set_fifo_size;
        struct sof_ipc4_msg msg = {{ 0 }};
-       int i;
+       int ret, i;
+
+       stream_priv = spcm->stream[direction].private;
 
        switch (state) {
        case SOF_IPC4_PIPE_RUNNING: /* Allocate and start chained dma */
@@ -281,6 +303,11 @@ static int sof_ipc4_chain_dma_trigger(struct snd_sof_dev *sdev,
                set_fifo_size = false;
                break;
        case SOF_IPC4_PIPE_RESET: /* Disable and free chained DMA. */
+
+               /* ChainDMA can only be reset if it has been allocated */
+               if (!stream_priv->chain_dma_allocated)
+                       return 0;
+
                allocate = false;
                enable = false;
                set_fifo_size = false;
@@ -338,7 +365,12 @@ static int sof_ipc4_chain_dma_trigger(struct snd_sof_dev *sdev,
        if (enable)
                msg.primary |= SOF_IPC4_GLB_CHAIN_DMA_ENABLE_MASK;
 
-       return sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+       ret = sof_ipc_tx_message_no_reply(sdev->ipc, &msg, 0);
+       /* Update the ChainDMA allocation state */
+       if (!ret)
+               stream_priv->chain_dma_allocated = allocate;
+
+       return ret;
 }
 
 static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
@@ -378,7 +410,7 @@ static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
         * trigger function that handles the rest for the substream.
         */
        if (pipeline->use_chain_dma)
-               return sof_ipc4_chain_dma_trigger(sdev, substream->stream,
+               return sof_ipc4_chain_dma_trigger(sdev, spcm, substream->stream,
                                                  pipeline_list, state, cmd);
 
        /* allocate memory for the pipeline data */
@@ -452,7 +484,7 @@ static int sof_ipc4_trigger_pipelines(struct snd_soc_component *component,
                 * Invalidate the stream_start_offset to make sure that it is
                 * going to be updated if the stream resumes
                 */
-               time_info = spcm->stream[substream->stream].private;
+               time_info = sof_ipc4_sps_to_time_info(&spcm->stream[substream->stream]);
                if (time_info)
                        time_info->stream_start_offset = SOF_IPC4_INVALID_STREAM_POSITION;
 
@@ -706,12 +738,16 @@ static int sof_ipc4_pcm_dai_link_fixup(struct snd_soc_pcm_runtime *rtd,
 static void sof_ipc4_pcm_free(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm)
 {
        struct snd_sof_pcm_stream_pipeline_list *pipeline_list;
+       struct sof_ipc4_pcm_stream_priv *stream_priv;
        int stream;
 
        for_each_pcm_streams(stream) {
                pipeline_list = &spcm->stream[stream].pipeline_list;
                kfree(pipeline_list->pipelines);
                pipeline_list->pipelines = NULL;
+
+               stream_priv = spcm->stream[stream].private;
+               kfree(stream_priv->time_info);
                kfree(spcm->stream[stream].private);
                spcm->stream[stream].private = NULL;
        }
@@ -721,7 +757,8 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm
 {
        struct snd_sof_pcm_stream_pipeline_list *pipeline_list;
        struct sof_ipc4_fw_data *ipc4_data = sdev->private;
-       struct sof_ipc4_timestamp_info *stream_info;
+       struct sof_ipc4_pcm_stream_priv *stream_priv;
+       struct sof_ipc4_timestamp_info *time_info;
        bool support_info = true;
        u32 abi_version;
        u32 abi_offset;
@@ -749,33 +786,41 @@ static int sof_ipc4_pcm_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm
                        return -ENOMEM;
                }
 
+               stream_priv = kzalloc(sizeof(*stream_priv), GFP_KERNEL);
+               if (!stream_priv) {
+                       sof_ipc4_pcm_free(sdev, spcm);
+                       return -ENOMEM;
+               }
+
+               spcm->stream[stream].private = stream_priv;
+
                if (!support_info)
                        continue;
 
-               stream_info = kzalloc(sizeof(*stream_info), GFP_KERNEL);
-               if (!stream_info) {
+               time_info = kzalloc(sizeof(*time_info), GFP_KERNEL);
+               if (!time_info) {
                        sof_ipc4_pcm_free(sdev, spcm);
                        return -ENOMEM;
                }
 
-               spcm->stream[stream].private = stream_info;
+               stream_priv->time_info = time_info;
        }
 
        return 0;
 }
 
-static void sof_ipc4_build_time_info(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *spcm)
+static void sof_ipc4_build_time_info(struct snd_sof_dev *sdev, struct snd_sof_pcm_stream *sps)
 {
        struct sof_ipc4_copier *host_copier = NULL;
        struct sof_ipc4_copier *dai_copier = NULL;
        struct sof_ipc4_llp_reading_slot llp_slot;
-       struct sof_ipc4_timestamp_info *info;
+       struct sof_ipc4_timestamp_info *time_info;
        struct snd_soc_dapm_widget *widget;
        struct snd_sof_dai *dai;
        int i;
 
        /* find host & dai to locate info in memory window */
-       for_each_dapm_widgets(spcm->list, i, widget) {
+       for_each_dapm_widgets(sps->list, i, widget) {
                struct snd_sof_widget *swidget = widget->dobj.private;
 
                if (!swidget)
@@ -795,44 +840,44 @@ static void sof_ipc4_build_time_info(struct snd_sof_dev *sdev, struct snd_sof_pc
                return;
        }
 
-       info = spcm->private;
-       info->host_copier = host_copier;
-       info->dai_copier = dai_copier;
-       info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_gpdma_reading_slots) +
-                                   sdev->fw_info_box.offset;
+       time_info = sof_ipc4_sps_to_time_info(sps);
+       time_info->host_copier = host_copier;
+       time_info->dai_copier = dai_copier;
+       time_info->llp_offset = offsetof(struct sof_ipc4_fw_registers,
+                                        llp_gpdma_reading_slots) + sdev->fw_info_box.offset;
 
        /* find llp slot used by current dai */
        for (i = 0; i < SOF_IPC4_MAX_LLP_GPDMA_READING_SLOTS; i++) {
-               sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
+               sof_mailbox_read(sdev, time_info->llp_offset, &llp_slot, sizeof(llp_slot));
                if (llp_slot.node_id == dai_copier->data.gtw_cfg.node_id)
                        break;
 
-               info->llp_offset += sizeof(llp_slot);
+               time_info->llp_offset += sizeof(llp_slot);
        }
 
        if (i < SOF_IPC4_MAX_LLP_GPDMA_READING_SLOTS)
                return;
 
        /* if no llp gpdma slot is used, check aggregated sdw slot */
-       info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_sndw_reading_slots) +
-                                       sdev->fw_info_box.offset;
+       time_info->llp_offset = offsetof(struct sof_ipc4_fw_registers,
+                                        llp_sndw_reading_slots) + sdev->fw_info_box.offset;
        for (i = 0; i < SOF_IPC4_MAX_LLP_SNDW_READING_SLOTS; i++) {
-               sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
+               sof_mailbox_read(sdev, time_info->llp_offset, &llp_slot, sizeof(llp_slot));
                if (llp_slot.node_id == dai_copier->data.gtw_cfg.node_id)
                        break;
 
-               info->llp_offset += sizeof(llp_slot);
+               time_info->llp_offset += sizeof(llp_slot);
        }
 
        if (i < SOF_IPC4_MAX_LLP_SNDW_READING_SLOTS)
                return;
 
        /* check EVAD slot */
-       info->llp_offset = offsetof(struct sof_ipc4_fw_registers, llp_evad_reading_slot) +
-                                       sdev->fw_info_box.offset;
-       sof_mailbox_read(sdev, info->llp_offset, &llp_slot, sizeof(llp_slot));
+       time_info->llp_offset = offsetof(struct sof_ipc4_fw_registers,
+                                        llp_evad_reading_slot) + sdev->fw_info_box.offset;
+       sof_mailbox_read(sdev, time_info->llp_offset, &llp_slot, sizeof(llp_slot));
        if (llp_slot.node_id != dai_copier->data.gtw_cfg.node_id)
-               info->llp_offset = 0;
+               time_info->llp_offset = 0;
 }
 
 static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
@@ -849,7 +894,7 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
        if (!spcm)
                return -EINVAL;
 
-       time_info = spcm->stream[substream->stream].private;
+       time_info = sof_ipc4_sps_to_time_info(&spcm->stream[substream->stream]);
        /* delay calculation is not supported by current fw_reg ABI */
        if (!time_info)
                return 0;
@@ -864,7 +909,7 @@ static int sof_ipc4_pcm_hw_params(struct snd_soc_component *component,
 
 static int sof_ipc4_get_stream_start_offset(struct snd_sof_dev *sdev,
                                            struct snd_pcm_substream *substream,
-                                           struct snd_sof_pcm_stream *stream,
+                                           struct snd_sof_pcm_stream *sps,
                                            struct sof_ipc4_timestamp_info *time_info)
 {
        struct sof_ipc4_copier *host_copier = time_info->host_copier;
@@ -918,7 +963,7 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
        struct sof_ipc4_timestamp_info *time_info;
        struct sof_ipc4_llp_reading_slot llp;
        snd_pcm_uframes_t head_cnt, tail_cnt;
-       struct snd_sof_pcm_stream *stream;
+       struct snd_sof_pcm_stream *sps;
        u64 dai_cnt, host_cnt, host_ptr;
        struct snd_sof_pcm *spcm;
        int ret;
@@ -927,8 +972,8 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
        if (!spcm)
                return -EOPNOTSUPP;
 
-       stream = &spcm->stream[substream->stream];
-       time_info = stream->private;
+       sps = &spcm->stream[substream->stream];
+       time_info = sof_ipc4_sps_to_time_info(sps);
        if (!time_info)
                return -EOPNOTSUPP;
 
@@ -938,7 +983,7 @@ static int sof_ipc4_pcm_pointer(struct snd_soc_component *component,
         * the statistics is complete. And it will not change after the first initiailization.
         */
        if (time_info->stream_start_offset == SOF_IPC4_INVALID_STREAM_POSITION) {
-               ret = sof_ipc4_get_stream_start_offset(sdev, substream, stream, time_info);
+               ret = sof_ipc4_get_stream_start_offset(sdev, substream, sps, time_info);
                if (ret < 0)
                        return -EOPNOTSUPP;
        }
@@ -1030,15 +1075,13 @@ static snd_pcm_sframes_t sof_ipc4_pcm_delay(struct snd_soc_component *component,
 {
        struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream);
        struct sof_ipc4_timestamp_info *time_info;
-       struct snd_sof_pcm_stream *stream;
        struct snd_sof_pcm *spcm;
 
        spcm = snd_sof_find_spcm_dai(component, rtd);
        if (!spcm)
                return 0;
 
-       stream = &spcm->stream[substream->stream];
-       time_info = stream->private;
+       time_info = sof_ipc4_sps_to_time_info(&spcm->stream[substream->stream]);
        /*
         * Report the stored delay value calculated in the pointer callback.
         * In the unlikely event that the calculation was skipped/aborted, the
index f03cee94bce62642e3c419d4f956a2011ea4dd3f..8804e00e7251b9724054b119509698a3a12899bf 100644 (file)
@@ -325,14 +325,13 @@ static int sof_pcm_trigger(struct snd_soc_component *component,
                        ipc_first = true;
                break;
        case SNDRV_PCM_TRIGGER_SUSPEND:
-               if (sdev->system_suspend_target == SOF_SUSPEND_S0IX &&
+               /*
+                * If DSP D0I3 is allowed during S0iX, set the suspend_ignored flag for
+                * D0I3-compatible streams to keep the firmware pipeline running
+                */
+               if (pcm_ops && pcm_ops->d0i3_supported_in_s0ix &&
+                   sdev->system_suspend_target == SOF_SUSPEND_S0IX &&
                    spcm->stream[substream->stream].d0i3_compatible) {
-                       /*
-                        * trap the event, not sending trigger stop to
-                        * prevent the FW pipelines from being stopped,
-                        * and mark the flag to ignore the upcoming DAPM
-                        * PM events.
-                        */
                        spcm->stream[substream->stream].suspend_ignored = true;
                        return 0;
                }
index 86bbb531e142c72be1ca5d710c466d16c9058734..499b6084b52637f12623172b0b17d027a41c08d4 100644 (file)
@@ -116,6 +116,7 @@ struct snd_sof_dai_config_data {
  *                               triggers. The FW keeps the host DMA running in this case and
  *                               therefore the host must do the same and should stop the DMA during
  *                               hw_free.
+ * @d0i3_supported_in_s0ix: Allow DSP D0I3 during S0iX
  */
 struct sof_ipc_pcm_ops {
        int (*hw_params)(struct snd_soc_component *component, struct snd_pcm_substream *substream,
@@ -135,6 +136,7 @@ struct sof_ipc_pcm_ops {
        bool reset_hw_params_during_stop;
        bool ipc_first_on_start;
        bool platform_stop_during_hw_free;
+       bool d0i3_supported_in_s0ix;
 };
 
 /**
index aa37c4ab0adbd9b47d2228cb4977e78ebf97b576..21cd41fec7a9cb932ecdc479ca41394349c48ffc 100644 (file)
@@ -1,8 +1,7 @@
 // SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright (c) 2020-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 //
 // tegra186_dspk.c - Tegra186 DSPK driver
-//
-// Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
 
 #include <linux/clk.h>
 #include <linux/device.h>
@@ -241,14 +240,14 @@ static int tegra186_dspk_hw_params(struct snd_pcm_substream *substream,
                return -EINVAL;
        }
 
-       cif_conf.client_bits = TEGRA_ACIF_BITS_24;
-
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
                cif_conf.audio_bits = TEGRA_ACIF_BITS_16;
+               cif_conf.client_bits = TEGRA_ACIF_BITS_16;
                break;
        case SNDRV_PCM_FORMAT_S32_LE:
                cif_conf.audio_bits = TEGRA_ACIF_BITS_32;
+               cif_conf.client_bits = TEGRA_ACIF_BITS_24;
                break;
        default:
                dev_err(dev, "unsupported format!\n");
index b892d66f78470a6430b7096efdafab202992dbd2..1e760c3155213db093ff09a12b922eb43c3ba4c4 100644 (file)
@@ -2417,12 +2417,6 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
 
        mcasp_reparent_fck(pdev);
 
-       ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
-                                             &davinci_mcasp_dai[mcasp->op_mode], 1);
-
-       if (ret != 0)
-               goto err;
-
        ret = davinci_mcasp_get_dma_type(mcasp);
        switch (ret) {
        case PCM_EDMA:
@@ -2449,6 +2443,12 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
                goto err;
        }
 
+       ret = devm_snd_soc_register_component(&pdev->dev, &davinci_mcasp_component,
+                                             &davinci_mcasp_dai[mcasp->op_mode], 1);
+
+       if (ret != 0)
+               goto err;
+
 no_audio:
        ret = davinci_mcasp_init_gpiochip(mcasp);
        if (ret) {
index 39ad96a18123f626c311aaa2fdf8213594b7d67d..edcd26106557b478eea1527077ffde97347c9012 100644 (file)
@@ -205,6 +205,9 @@ __weak noinline struct file *bpf_testmod_return_ptr(int arg)
        case 5: return (void *)~(1ull << 30);   /* trigger extable */
        case 6: return &f;                      /* valid addr */
        case 7: return (void *)((long)&f | 1);  /* kernel tricks */
+#ifdef CONFIG_X86_64
+       case 8: return (void *)VSYSCALL_ADDR;   /* vsyscall page address */
+#endif
        default: return NULL;
        }
 }
index eef816b80993f528569f0de79b5f5e5fcda0e289..ca917c71ff602da5ab0805c4cee51e0df5d185e5 100644 (file)
@@ -84,6 +84,18 @@ static struct vm_gic vm_gic_create_with_vcpus(uint32_t gic_dev_type,
        return v;
 }
 
+static struct vm_gic vm_gic_create_barebones(uint32_t gic_dev_type)
+{
+       struct vm_gic v;
+
+       v.gic_dev_type = gic_dev_type;
+       v.vm = vm_create_barebones();
+       v.gic_fd = kvm_create_device(v.vm, gic_dev_type);
+
+       return v;
+}
+
+
 static void vm_gic_destroy(struct vm_gic *v)
 {
        close(v->gic_fd);
@@ -357,6 +369,40 @@ static void test_vcpus_then_vgic(uint32_t gic_dev_type)
        vm_gic_destroy(&v);
 }
 
+#define KVM_VGIC_V2_ATTR(offset, cpu) \
+       (FIELD_PREP(KVM_DEV_ARM_VGIC_OFFSET_MASK, offset) | \
+        FIELD_PREP(KVM_DEV_ARM_VGIC_CPUID_MASK, cpu))
+
+#define GIC_CPU_CTRL   0x00
+
+static void test_v2_uaccess_cpuif_no_vcpus(void)
+{
+       struct vm_gic v;
+       u64 val = 0;
+       int ret;
+
+       v = vm_gic_create_barebones(KVM_DEV_TYPE_ARM_VGIC_V2);
+       subtest_dist_rdist(&v);
+
+       ret = __kvm_has_device_attr(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+                                   KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0));
+       TEST_ASSERT(ret && errno == EINVAL,
+                   "accessed non-existent CPU interface, want errno: %i",
+                   EINVAL);
+       ret = __kvm_device_attr_get(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+                                   KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
+       TEST_ASSERT(ret && errno == EINVAL,
+                   "accessed non-existent CPU interface, want errno: %i",
+                   EINVAL);
+       ret = __kvm_device_attr_set(v.gic_fd, KVM_DEV_ARM_VGIC_GRP_CPU_REGS,
+                                   KVM_VGIC_V2_ATTR(GIC_CPU_CTRL, 0), &val);
+       TEST_ASSERT(ret && errno == EINVAL,
+                   "accessed non-existent CPU interface, want errno: %i",
+                   EINVAL);
+
+       vm_gic_destroy(&v);
+}
+
 static void test_v3_new_redist_regions(void)
 {
        struct kvm_vcpu *vcpus[NR_VCPUS];
@@ -675,6 +721,9 @@ void run_tests(uint32_t gic_dev_type)
        test_vcpus_then_vgic(gic_dev_type);
        test_vgic_then_vcpus(gic_dev_type);
 
+       if (VGIC_DEV_IS_V2(gic_dev_type))
+               test_v2_uaccess_cpuif_no_vcpus();
+
        if (VGIC_DEV_IS_V3(gic_dev_type)) {
                test_v3_new_redist_regions();
                test_v3_typer_accesses();