Peter Oruba <peter.oruba@amd.com>
Pratyush Anand <pratyush.anand@gmail.com> <pratyush.anand@st.com>
Praveen BP <praveenbp@ti.com>
+Punit Agrawal <punitagrawal@gmail.com> <punit.agrawal@arm.com>
Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Oleksij Rempel <linux@rempel-privat.de> <bug-track@fisher-privat.net>
Oleksij Rempel <linux@rempel-privat.de> <external.Oleksij.Rempel@de.bosch.com>
0-| / \/ \/
+---0----1----2----3----4----5----6------------> time (s)
- 2. To make the LED go instantly from one brigntess value to another,
- we should use use zero-time lengths (the brightness must be same as
+ 2. To make the LED go instantly from one brightness value to another,
+ we should use zero-time lengths (the brightness must be same as
the previous tuple's). So the format should be:
"brightness_1 duration_1 brightness_1 0 brightness_2 duration_2
brightness_2 0 ...". For example:
compatible = "renesas,r8a77470"
- RZ/G2M (R8A774A1)
compatible = "renesas,r8a774a1"
- - RZ/G2E (RA8774C0)
+ - RZ/G2E (R8A774C0)
compatible = "renesas,r8a774c0"
- R-Car M1A (R8A77781)
compatible = "renesas,r8a7778"
I2C for OMAP platforms
Required properties :
-- compatible : Must be "ti,omap2420-i2c", "ti,omap2430-i2c", "ti,omap3-i2c"
- or "ti,omap4-i2c"
+- compatible : Must be
+ "ti,omap2420-i2c" for OMAP2420 SoCs
+ "ti,omap2430-i2c" for OMAP2430 SoCs
+ "ti,omap3-i2c" for OMAP3 SoCs
+ "ti,omap4-i2c" for OMAP4+ SoCs
+ "ti,am654-i2c", "ti,omap4-i2c" for AM654 SoCs
- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
- #address-cells = <1>;
- #size-cells = <0>;
--- /dev/null
+Kernel driver i2c-nvidia-gpu
+
+Datasheet: not publicly available.
+
+Authors:
+ Ajay Gupta <ajayg@nvidia.com>
+
+Description
+-----------
+
+i2c-nvidia-gpu is a driver for I2C controller included in NVIDIA Turing
+and later GPUs and it is used to communicate with Type-C controller on GPUs.
+
+If your 'lspci -v' listing shows something like the following,
+
+01:00.3 Serial bus controller [0c80]: NVIDIA Corporation Device 1ad9 (rev a1)
+
+then this driver should support the I2C controller of your GPU.
____________________________________________________________|___________________________________________________________
| | | |
ffff800000000000 | -128 TB | ffff87ffffffffff | 8 TB | ... guard hole, also reserved for hypervisor
- ffff880000000000 | -120 TB | ffffc7ffffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
- ffffc80000000000 | -56 TB | ffffc8ffffffffff | 1 TB | ... unused hole
+ ffff880000000000 | -120 TB | ffff887fffffffff | 0.5 TB | LDT remap for PTI
+ ffff888000000000 | -119.5 TB | ffffc87fffffffff | 64 TB | direct mapping of all physical memory (page_offset_base)
+ ffffc88000000000 | -55.5 TB | ffffc8ffffffffff | 0.5 TB | ... unused hole
ffffc90000000000 | -55 TB | ffffe8ffffffffff | 32 TB | vmalloc/ioremap space (vmalloc_base)
ffffe90000000000 | -23 TB | ffffe9ffffffffff | 1 TB | ... unused hole
ffffea0000000000 | -22 TB | ffffeaffffffffff | 1 TB | virtual memory map (vmemmap_base)
ffffeb0000000000 | -21 TB | ffffebffffffffff | 1 TB | ... unused hole
ffffec0000000000 | -20 TB | fffffbffffffffff | 16 TB | KASAN shadow memory
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | LDT remap for PTI
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
__________________|____________|__________________|_________|____________________________________________________________
|
- | Identical layout to the 47-bit one from here on:
+ | Identical layout to the 56-bit one from here on:
____________________________________________________________|____________________________________________________________
| | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
__________________|____________|__________________|_________|___________________________________________________________
| | | |
0000800000000000 | +64 PB | ffff7fffffffffff | ~16K PB | ... huge, still almost 64 bits wide hole of non-canonical
- | | | | virtual memory addresses up to the -128 TB
+ | | | | virtual memory addresses up to the -64 PB
| | | | starting offset of kernel mappings.
__________________|____________|__________________|_________|___________________________________________________________
|
____________________________________________________________|___________________________________________________________
| | | |
ff00000000000000 | -64 PB | ff0fffffffffffff | 4 PB | ... guard hole, also reserved for hypervisor
- ff10000000000000 | -60 PB | ff8fffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
- ff90000000000000 | -28 PB | ff9fffffffffffff | 4 PB | LDT remap for PTI
+ ff10000000000000 | -60 PB | ff10ffffffffffff | 0.25 PB | LDT remap for PTI
+ ff11000000000000 | -59.75 PB | ff90ffffffffffff | 32 PB | direct mapping of all physical memory (page_offset_base)
+ ff91000000000000 | -27.75 PB | ff9fffffffffffff | 3.75 PB | ... unused hole
ffa0000000000000 | -24 PB | ffd1ffffffffffff | 12.5 PB | vmalloc/ioremap space (vmalloc_base)
ffd2000000000000 | -11.5 PB | ffd3ffffffffffff | 0.5 PB | ... unused hole
ffd4000000000000 | -11 PB | ffd5ffffffffffff | 0.5 PB | virtual memory map (vmemmap_base)
ffd6000000000000 | -10.5 PB | ffdeffffffffffff | 2.25 PB | ... unused hole
ffdf000000000000 | -8.25 PB | fffffdffffffffff | ~8 PB | KASAN shadow memory
- fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
- | | | | vaddr_end for KASLR
- fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
- fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
- ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
__________________|____________|__________________|_________|____________________________________________________________
|
| Identical layout to the 47-bit one from here on:
____________________________________________________________|____________________________________________________________
| | | |
+ fffffc0000000000 | -4 TB | fffffdffffffffff | 2 TB | ... unused hole
+ | | | | vaddr_end for KASLR
+ fffffe0000000000 | -2 TB | fffffe7fffffffff | 0.5 TB | cpu_entry_area mapping
+ fffffe8000000000 | -1.5 TB | fffffeffffffffff | 0.5 TB | ... unused hole
+ ffffff0000000000 | -1 TB | ffffff7fffffffff | 0.5 TB | %esp fixup stacks
ffffff8000000000 | -512 GB | ffffffeeffffffff | 444 GB | ... unused hole
ffffffef00000000 | -68 GB | fffffffeffffffff | 64 GB | EFI region mapping space
ffffffff00000000 | -4 GB | ffffffff7fffffff | 2 GB | ... unused hole
0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits
140/080 ALL edid_info Video mode setup (struct edid_info)
1C0/020 ALL efi_info EFI 32 information (struct efi_info)
-1E0/004 ALL alk_mem_k Alternative mem check, in KB
+1E0/004 ALL alt_mem_k Alternative mem check, in KB
1E4/004 ALL scratch Scratch field for the kernel setup code
1E8/001 ALL e820_entries Number of entries in e820_table (below)
1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below)
HID CORE LAYER
M: Jiri Kosina <jikos@kernel.org>
-R: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
L: linux-input@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
S: Maintained
F: drivers/hid/
F: include/linux/hid*
S: Maintained
F: drivers/i2c/i2c-core-acpi.c
+I2C CONTROLLER DRIVER FOR NVIDIA GPU
+M: Ajay Gupta <ajayg@nvidia.com>
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/i2c/busses/i2c-nvidia-gpu
+F: drivers/i2c/busses/i2c-nvidia-gpu.c
+
I2C MUXES
M: Peter Rosin <peda@axentia.se>
L: linux-i2c@vger.kernel.org
LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
M: Viresh Kumar <vireshk@kernel.org>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: include/linux/pata_arasan_cf_data.h
F: drivers/ata/pata_arasan_cf.c
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/pata_ftide010.c
F: drivers/ata/sata_gemini.c
LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
M: Mikael Pettersson <mikpelinux@gmail.com>
L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained
F: drivers/ata/sata_promise.*
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
+OMAP I2C DRIVER
+M: Vignesh R <vigneshr@ti.com>
+L: linux-omap@vger.kernel.org
+L: linux-i2c@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/i2c/i2c-omap.txt
+F: drivers/i2c/busses/i2c-omap.c
+
OMAP IMAGING SUBSYSTEM (OMAP3 ISP and OMAP4 ISS)
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
M: Jiri Kosina <jikos@kernel.org>
-R: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
L: linux-usb@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/hid/hid.git
S: Maintained
F: Documentation/hid/hiddev.txt
F: drivers/hid/usbhid/
})
#define user_termios_to_kernel_termios(k, u) \
- copy_from_user(k, u, sizeof(struct termios))
+ copy_from_user(k, u, sizeof(struct termios2))
#define kernel_termios_to_user_termios(u, k) \
+ copy_to_user(u, k, sizeof(struct termios2))
+
+#define user_termios_to_kernel_termios_1(k, u) \
+ copy_from_user(k, u, sizeof(struct termios))
+
+#define kernel_termios_to_user_termios_1(u, k) \
copy_to_user(u, k, sizeof(struct termios))
#endif /* _ALPHA_TERMIOS_H */
#define TCXONC _IO('t', 30)
#define TCFLSH _IO('t', 31)
+#define TCGETS2 _IOR('T', 42, struct termios2)
+#define TCSETS2 _IOW('T', 43, struct termios2)
+#define TCSETSW2 _IOW('T', 44, struct termios2)
+#define TCSETSF2 _IOW('T', 45, struct termios2)
+
#define TIOCSWINSZ _IOW('t', 103, struct winsize)
#define TIOCGWINSZ _IOR('t', 104, struct winsize)
#define TIOCSTART _IO('t', 110) /* start output, like ^Q */
speed_t c_ospeed; /* output speed */
};
+/* Alpha has identical termios and termios2 */
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_cc[NCCS]; /* control characters */
+ cc_t c_line; /* line discipline (== c_cc[19]) */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
/* Alpha has matching termios and ktermios */
struct ktermios {
#define B3000000 00034
#define B3500000 00035
#define B4000000 00036
+#define BOTHER 00037
#define CSIZE 00001400
#define CS5 00000000
#define CMSPAR 010000000000 /* mark or space (stick) parity */
#define CRTSCTS 020000000000 /* flow control */
+#define CIBAUD 07600000
+#define IBSHIFT 16
+
/* c_lflag bits */
#define ISIG 0x00000080
#define ICANON 0x00000100
};
chosen {
- stdout-path = "&uart1:115200n8";
+ stdout-path = "serial0:115200n8";
};
memory@70000000 {
i2c1: i2c@21a0000 {
#address-cells = <1>;
#size-cells = <0>;
- compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c";
+ compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c";
reg = <0x021a0000 0x4000>;
interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SLL_CLK_I2C1>;
regulator-name = "enet_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
- gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ gpio = <&gpio2 6 GPIO_ACTIVE_LOW>;
+ regulator-boot-on;
+ regulator-always-on;
};
reg_pcie_gpio: regulator-pcie-gpio {
phy-supply = <®_enet_3v3>;
phy-mode = "rgmii";
phy-handle = <ðphy1>;
+ phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>;
status = "okay";
mdio {
MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081
MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081
MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91
+ /* phy reset */
+ MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0
>;
};
compatible = "fsl,vf610m4";
chosen {
- bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw";
- stdout-path = "&uart2";
+ bootargs = "clk_ignore_unused init=/linuxrc rw";
+ stdout-path = "serial2:115200";
};
memory@8c000000 {
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
CONFIG_CGROUPS=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EMBEDDED=y
#ifndef _ASM_PGTABLE_2LEVEL_H
#define _ASM_PGTABLE_2LEVEL_H
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Hardware-wise, we have a two level page table structure, where the first
hvc #0
ldmfd sp!, {r0 - r3}
b cpu_v7_switch_mm
-ENDPROC(cpu_v7_smc_switch_mm)
+ENDPROC(cpu_v7_hvc_switch_mm)
#endif
ENTRY(cpu_v7_iciallu_switch_mm)
mov r3, #0
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
clock-names = "stmmaceth";
tx-fifo-depth = <16384>;
rx-fifo-depth = <16384>;
+ snps,multicast-filter-bins = <256>;
status = "disabled";
};
clock-names = "fck", "brg_int", "scif_clk";
dmas = <&dmac1 0x35>, <&dmac1 0x34>,
<&dmac2 0x35>, <&dmac2 0x34>;
- dma-names = "tx", "rx";
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7795_PD_ALWAYS_ON>;
resets = <&cpg 518>;
status = "disabled";
aliases {
serial0 = &scif0;
- ethernet0 = &avb;
+ ethernet0 = &gether;
};
chosen {
};
};
-&avb {
- pinctrl-0 = <&avb_pins>;
- pinctrl-names = "default";
-
- phy-mode = "rgmii-id";
- phy-handle = <&phy0>;
- renesas,no-ether-link;
- status = "okay";
-
- phy0: ethernet-phy@0 {
- rxc-skew-ps = <1500>;
- reg = <0>;
- interrupt-parent = <&gpio1>;
- interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
- };
-};
-
&canfd {
pinctrl-0 = <&canfd0_pins>;
pinctrl-names = "default";
clock-frequency = <32768>;
};
+&gether {
+ pinctrl-0 = <&gether_pins>;
+ pinctrl-names = "default";
+
+ phy-mode = "rgmii-id";
+ phy-handle = <&phy0>;
+ renesas,no-ether-link;
+ status = "okay";
+
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <1500>;
+ reg = <0>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <23 IRQ_TYPE_LEVEL_LOW>;
+ };
+};
+
&i2c0 {
pinctrl-0 = <&i2c0_pins>;
pinctrl-names = "default";
};
&pfc {
- avb_pins: avb {
- groups = "avb_mdio", "avb_rgmii";
- function = "avb";
- };
-
canfd0_pins: canfd0 {
groups = "canfd0_data_a";
function = "canfd0";
};
+ gether_pins: gether {
+ groups = "gether_mdio_a", "gether_rgmii",
+ "gether_txcrefclk", "gether_txcrefclk_mega";
+ function = "gether";
+ };
+
i2c0_pins: i2c0 {
groups = "i2c0";
function = "i2c0";
#define KERNEL_DS UL(-1)
#define USER_DS (TASK_SIZE_64 - 1)
+/*
+ * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
+ * no point in shifting all network buffers by 2 bytes just to make some IP
+ * header fields appear aligned in memory, potentially sacrificing some DMA
+ * performance on some platforms.
+ */
+#define NET_IP_ALIGN 0
+
#ifndef __ASSEMBLY__
#ifdef __KERNEL__
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
dma_contiguous_reserve(arm64_dma_phys_limit);
-
- memblock_allow_resize();
}
void __init bootmem_init(void)
memblock_free(__pa_symbol(init_pg_dir),
__pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir));
+
+ memblock_allow_resize();
}
/*
*/
#ifdef CONFIG_SUN3
#define PTRS_PER_PTE 16
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 2048
#elif defined(CONFIG_COLDFIRE)
#define PTRS_PER_PTE 512
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define PTRS_PER_PMD 1
#define PTRS_PER_PGD 1024
#else
#include <asm-generic/4level-fixup.h>
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
void (*cvmx_override_ipd_port_setup) (int ipd_port);
/* Port count per interface */
-static int interface_port_count[5];
+static int interface_port_count[9];
/**
* Return the number of interfaces the chip has. Each interface
void *ret;
ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
- if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
+ if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) {
dma_cache_wback_inv((unsigned long) ret, size);
ret = (void *)UNCAC_ADDR(ret);
}
#ifndef _ASMNDS32_PGTABLE_H
#define _ASMNDS32_PGTABLE_H
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#include <asm-generic/4level-fixup.h>
#include <asm-generic/sizes.h>
#if CONFIG_PGTABLE_LEVELS == 3
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
#else
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
#define BITS_PER_PMD 0
#endif
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g)
KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,))
UTS_MACHINE := s390x
-STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384)
+STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384)
CHECKFLAGS += -D__s390__ -D__s390x__
export LD_BFD
OBJECTS := $(addprefix $(obj)/,$(obj-y))
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T
-$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS)
+$(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE
$(call if_changed,ld)
-OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info
+OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load
$(obj)/info.bin: vmlinux FORCE
$(call if_changed,objcopy)
suffix-$(CONFIG_KERNEL_LZO) := .lzo
suffix-$(CONFIG_KERNEL_XZ) := .xz
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
$(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
$(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y)
+$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,xzkern)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
CONFIG_PREEMPT=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_KEXEC_FILE=y
+CONFIG_EXPOLINE=y
+CONFIG_EXPOLINE_AUTO=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_CHSC_SCH=y
+CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_BINFMT_MISC=m
CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
CONFIG_NF_CT_NETLINK=m
CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
-CONFIG_NFT_EXTHDR=m
-CONFIG_NFT_META=m
CONFIG_NFT_CT=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y
CONFIG_OPENVSWITCH=m
+CONFIG_VSOCKETS=m
+CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y
CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
+CONFIG_ISM=m
CONFIG_INPUT_EVDEV=y
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
CONFIG_MLX5_INFINIBAND=m
CONFIG_VFIO=m
CONFIG_VFIO_PCI=m
+CONFIG_VFIO_MDEV=m
+CONFIG_VFIO_MDEV_DEVICE=m
CONFIG_VIRTIO_PCI=m
CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
+CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_KVM=m
CONFIG_KVM_S390_UCONTROL=y
CONFIG_VHOST_NET=m
+CONFIG_VHOST_VSOCK=m
CONFIG_CGROUP_PERF=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_LIVEPATCH=y
+CONFIG_NR_CPUS=256
+CONFIG_NUMA=y
+CONFIG_HZ_100=y
+CONFIG_KEXEC_FILE=y
+CONFIG_CRASH_DUMP=y
+CONFIG_HIBERNATION=y
+CONFIG_PM_DEBUG=y
+CONFIG_CMM=m
CONFIG_OPROFILE=y
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
CONFIG_DEFAULT_DEADLINE=y
-CONFIG_LIVEPATCH=y
-CONFIG_NR_CPUS=256
-CONFIG_NUMA=y
-CONFIG_HZ_100=y
-CONFIG_KEXEC_FILE=y
+CONFIG_BINFMT_MISC=m
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
CONFIG_IDLE_PAGE_TRACKING=y
-CONFIG_CRASH_DUMP=y
-CONFIG_BINFMT_MISC=m
-CONFIG_HIBERNATION=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_BLK_DEV_RAM=y
CONFIG_VIRTIO_BLK=y
CONFIG_SCSI=y
+# CONFIG_SCSI_MQ_DEFAULT is not set
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_ST=y
CONFIG_BLK_DEV_SR=y
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_AURORA is not set
# CONFIG_NET_VENDOR_CORTINA is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SOCIONEXT is not set
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_HUGETLBFS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_INFO_DWARF4=y
-CONFIG_GDB_SCRIPTS=y
-CONFIG_UNUSED_SYMBOLS=y
-CONFIG_DEBUG_SECTION_MISMATCH=y
-CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_DETECT_HUNG_TASK=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_PROVE_LOCKING=y
-CONFIG_LOCK_STAT=y
-CONFIG_DEBUG_LOCKDEP=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_SG=y
-CONFIG_DEBUG_NOTIFIERS=y
-CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_LATENCYTOP=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CFB=m
CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_OFB=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m
-CONFIG_ZCRYPT_MULTIDEVNODES=y
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m
# CONFIG_XZ_DEC_ARM is not set
# CONFIG_XZ_DEC_ARMTHUMB is not set
# CONFIG_XZ_DEC_SPARC is not set
-CONFIG_CMM=m
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+CONFIG_GDB_SCRIPTS=y
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PROVE_LOCKING=y
+CONFIG_LOCK_STAT=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_RCU_CPU_STALL_TIMEOUT=60
+CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_FUNCTION_PROFILER=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_S390_PTDUMP=y
mm->context.asce_limit = STACK_TOP_MAX;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION3;
- /* pgd_alloc() did not account this pud */
- mm_inc_nr_puds(mm);
break;
case -PAGE_SIZE:
/* forked 5-level task, set new asce with new_mm->pgd */
/* forked 2-level compat task, set new asce with new mm->pgd */
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
- /* pgd_alloc() did not account this pmd */
- mm_inc_nr_pmds(mm);
- mm_inc_nr_puds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
static inline unsigned long pgd_entry_type(struct mm_struct *mm)
{
- if (mm->context.asce_limit <= _REGION3_SIZE)
+ if (mm_pmd_folded(mm))
return _SEGMENT_ENTRY_EMPTY;
- if (mm->context.asce_limit <= _REGION2_SIZE)
+ if (mm_pud_folded(mm))
return _REGION3_ENTRY_EMPTY;
- if (mm->context.asce_limit <= _REGION1_SIZE)
+ if (mm_p4d_folded(mm))
return _REGION2_ENTRY_EMPTY;
return _REGION1_ENTRY_EMPTY;
}
_REGION_ENTRY_PROTECT | \
_REGION_ENTRY_NOEXEC)
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION1_SIZE;
+}
+#define mm_p4d_folded(mm) mm_p4d_folded(mm)
+
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION2_SIZE;
+}
+#define mm_pud_folded(mm) mm_pud_folded(mm)
+
+static inline bool mm_pmd_folded(struct mm_struct *mm)
+{
+ return mm->context.asce_limit <= _REGION3_SIZE;
+}
+#define mm_pmd_folded(mm) mm_pmd_folded(mm)
+
static inline int mm_has_pgste(struct mm_struct *mm)
{
#ifdef CONFIG_PGSTE
return sp;
}
-static __no_sanitize_address_or_inline unsigned short stap(void)
+static __no_kasan_or_inline unsigned short stap(void)
{
unsigned short cpu_address;
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
-static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask)
+static __no_kasan_or_inline void __load_psw_mask(unsigned long mask)
{
unsigned long addr;
psw_t psw;
* General size of kernel stacks
*/
#ifdef CONFIG_KASAN
-#define THREAD_SIZE_ORDER 3
+#define THREAD_SIZE_ORDER 4
#else
#define THREAD_SIZE_ORDER 2
#endif
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION3_SIZE)
+ if (mm_pmd_folded(tlb->mm))
return;
pgtable_pmd_page_dtor(virt_to_page(pmd));
tlb_remove_table(tlb, pmd);
static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION1_SIZE)
+ if (mm_p4d_folded(tlb->mm))
return;
tlb_remove_table(tlb, p4d);
}
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
unsigned long address)
{
- if (tlb->mm->context.asce_limit <= _REGION2_SIZE)
+ if (mm_pud_folded(tlb->mm))
return;
tlb_remove_table(tlb, pud);
}
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
lghi %r4,__TASK_stack
lghi %r1,__TASK_thread
- lg %r5,0(%r4,%r3) # start of kernel stack of next
+ llill %r5,STACK_INIT
stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
- lgr %r15,%r5
- aghi %r15,STACK_INIT # end of kernel stack of next
+ lg %r15,0(%r4,%r3) # start of kernel stack of next
+ agr %r15,%r5 # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
return -ENOENT;
if (ev > PERF_CPUM_CF_MAX_CTR)
- return -EINVAL;
+ return -ENOENT;
/* Obtain the counter set to which the specified counter belongs */
set = get_counter_set(ev);
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
-static struct attribute *cpumsf_pmu_events_attr[] = {
- CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC),
- NULL,
- NULL,
+/* Attribute list for CPU_SF.
+ *
+ * The availablitiy depends on the CPU_MF sampling facility authorization
+ * for basic + diagnositic samples. This is determined at initialization
+ * time by the sampling facility device driver.
+ * If the authorization for basic samples is turned off, it should be
+ * also turned off for diagnostic sampling.
+ *
+ * During initialization of the device driver, check the authorization
+ * level for diagnostic sampling and installs the attribute
+ * file for diagnostic sampling if necessary.
+ *
+ * For now install a placeholder to reference all possible attributes:
+ * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG.
+ * Add another entry for the final NULL pointer.
+ */
+enum {
+ SF_CYCLES_BASIC_ATTR_IDX = 0,
+ SF_CYCLES_BASIC_DIAG_ATTR_IDX,
+ SF_CYCLES_ATTR_MAX
+};
+
+static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = {
+ [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC)
};
PMU_FORMAT_ATTR(event, "config:0-63");
if (si.ad) {
sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB);
- cpumsf_pmu_events_attr[1] =
+ /* Sampling of diagnostic data authorized,
+ * install event into attribute list of PMU device.
+ */
+ cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] =
CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG);
}
$(obj)/vdso32_wrapper.o : $(obj)/vdso32.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32)
+$(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE
$(call if_changed,vdso32ld)
# strip rule for the .so file
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso32): %.o: %.S
+$(obj-vdso32): %.o: %.S FORCE
$(call if_changed_dep,vdso32as)
# actual build commands
quiet_cmd_vdso32ld = VDSO32L $@
- cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso32as = VDSO32A $@
cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $<
$(obj)/vdso64_wrapper.o : $(obj)/vdso64.so
# link rule for the .so file, .lds has to be first
-$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64)
+$(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE
$(call if_changed,vdso64ld)
# strip rule for the .so file
$(call if_changed,objcopy)
# assembly rules for the .S files
-$(obj-vdso64): %.o: %.S
+$(obj-vdso64): %.o: %.S FORCE
$(call if_changed_dep,vdso64as)
# actual build commands
quiet_cmd_vdso64ld = VDSO64L $@
- cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@
quiet_cmd_vdso64as = VDSO64A $@
cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $<
* uncompressed image info used by the decompressor
* it should match struct vmlinux_info
*/
- .vmlinux.info 0 : {
+ .vmlinux.info 0 (INFO) : {
QUAD(_stext) /* default_lma */
QUAD(startup_continue) /* entry */
QUAD(__bss_start - _stext) /* image_size */
QUAD(__bss_stop - __bss_start) /* bss_size */
QUAD(__boot_data_start) /* bootdata_off */
QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */
- }
+ } :NONE
/* Debugging sections. */
STABS_DEBUG
mm->context.asce_limit = _REGION1_SIZE;
mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm_inc_nr_puds(mm);
} else {
crst_table_init(table, _REGION1_ENTRY_EMPTY);
pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd);
{
return mode->distance ? mode->distance(a, b) : 0;
}
+EXPORT_SYMBOL(__node_distance);
int numa_debug_enabled;
io_req->fds[0] = dev->cow.fd;
else
io_req->fds[0] = dev->fd;
+ io_req->error = 0;
if (req_op(req) == REQ_OP_FLUSH) {
io_req->op = UBD_FLUSH;
io_req->cow_offset = -1;
io_req->offset = off;
io_req->length = bvec->bv_len;
- io_req->error = 0;
io_req->sector_mask = 0;
-
io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset;
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
+ struct ubd *ubd_dev = hctx->queue->queuedata;
struct request *req = bd->rq;
int ret = 0;
blk_mq_start_request(req);
+ spin_lock_irq(&ubd_dev->lock);
+
if (req_op(req) == REQ_OP_FLUSH) {
ret = ubd_queue_one_vec(hctx, req, 0, NULL);
} else {
}
}
out:
- if (ret < 0) {
+ spin_unlock_irq(&ubd_dev->lock);
+
+ if (ret < 0)
blk_mq_requeue_request(req, true);
- }
+
return BLK_STS_OK;
}
bool "ScaleMP vSMP"
select HYPERVISOR_GUEST
select PARAVIRT
- select PARAVIRT_XXL
depends on X86_64 && PCI
depends on X86_EXTENDED_PLATFORM
depends on SMP
KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000)
endif
-# Speed up the build
-KBUILD_CFLAGS += -pipe
# Workaround for a gcc prelease that unfortunately was shipped in a suse release
KBUILD_CFLAGS += -Wno-sign-compare
#
archmacros:
$(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s
-ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,-
+ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s
export ASM_MACRO_FLAGS
KBUILD_CFLAGS += $(ASM_MACRO_FLAGS)
int mce_available(struct cpuinfo_x86 *c);
bool mce_is_memory_error(struct mce *m);
+bool mce_is_correctable(struct mce *m);
+int mce_usable_address(struct mce *m);
DECLARE_PER_CPU(unsigned, mce_exception_count);
DECLARE_PER_CPU(unsigned, mce_poll_count);
: "cc");
}
#endif
- return hv_status;
+ return hv_status;
}
/*
/*
* Set __PAGE_OFFSET to the most negative possible address +
- * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a
- * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's
- * what Xen requires.
+ * PGDIR_SIZE*17 (pgd slot 273).
+ *
+ * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for
+ * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary,
+ * but it's what Xen requires.
*/
-#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL)
-#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL)
+#define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL)
+#define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL)
#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
#define __PAGE_OFFSET page_offset_base
*/
#define MAXMEM (1UL << MAX_PHYSMEM_BITS)
-#define LDT_PGD_ENTRY_L4 -3UL
-#define LDT_PGD_ENTRY_L5 -112UL
-#define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4)
+#define LDT_PGD_ENTRY -240UL
#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT)
#define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE)
#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
{
- u32 val = 0;
-
- if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
- "I", _Q_PENDING_OFFSET))
- val |= _Q_PENDING_VAL;
+ u32 val;
+ /*
+ * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
+ * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
+ * statement expression, which GCC doesn't like.
+ */
+ val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
+ "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
return val;
#include <linux/mm.h>
#include <linux/device.h>
-#include <linux/uaccess.h>
+#include <asm/extable.h>
#include <asm/page.h>
#include <asm/pgtable.h>
*/
static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
{
- return __put_user(val, (unsigned long __user *)addr);
+ int ret = 0;
+
+ asm volatile("1: mov %[val], %[ptr]\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: sub $1, %[ret]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : [ret] "+r" (ret), [ptr] "=m" (*addr)
+ : [val] "r" (val));
+
+ return ret;
}
-static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val)
+static inline int xen_safe_read_ulong(const unsigned long *addr,
+ unsigned long *val)
{
- return __get_user(*val, (unsigned long __user *)addr);
+ int ret = 0;
+ unsigned long rval = ~0ul;
+
+ asm volatile("1: mov %[ptr], %[rval]\n"
+ "2:\n"
+ ".section .fixup, \"ax\"\n"
+ "3: sub $1, %[ret]\n"
+ " jmp 2b\n"
+ ".previous\n"
+ _ASM_EXTABLE(1b, 3b)
+ : [ret] "+r" (ret), [rval] "+r" (rval)
+ : [ptr] "m" (*addr));
+ *val = rval;
+
+ return ret;
}
#ifdef CONFIG_XEN_PV
* be somewhat complicated (e.g. segment offset would require an instruction
* parser). So only support physical addresses up to page granuality for now.
*/
-static int mce_usable_address(struct mce *m)
+int mce_usable_address(struct mce *m)
{
if (!(m->status & MCI_STATUS_ADDRV))
return 0;
return 1;
}
+EXPORT_SYMBOL_GPL(mce_usable_address);
bool mce_is_memory_error(struct mce *m)
{
}
EXPORT_SYMBOL_GPL(mce_is_memory_error);
-static bool mce_is_correctable(struct mce *m)
+bool mce_is_correctable(struct mce *m)
{
if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED)
return false;
return true;
}
+EXPORT_SYMBOL_GPL(mce_is_correctable);
static bool cec_add_mce(struct mce *m)
{
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kexec.h>
+#include <linux/i8253.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
if (efi_enabled(EFI_BOOT))
x86_platform.get_nmi_reason = hv_get_nmi_reason;
+ /*
+ * Hyper-V VMs have a PIT emulation quirk such that zeroing the
+ * counter register during PIT shutdown restarts the PIT. So it
+ * continues to interrupt @18.2 HZ. Setting i8253_clear_counter
+ * to false tells pit_shutdown() not to zero the counter so that
+ * the PIT really is shutdown. Generation 2 VMs don't have a PIT,
+ * and setting this value has no effect.
+ */
+ i8253_clear_counter_on_shutdown = false;
+
#if IS_ENABLED(CONFIG_HYPERV)
/*
* Setup the hook to get control post apic initialization.
}
early_param("no-vmw-sched-clock", setup_vmw_sched_clock);
-static unsigned long long vmware_sched_clock(void)
+static unsigned long long notrace vmware_sched_clock(void)
{
unsigned long long ns;
/*
* If PTI is enabled, this maps the LDT into the kernelmode and
* usermode tables for the given mm.
- *
- * There is no corresponding unmap function. Even if the LDT is freed, we
- * leave the PTEs around until the slot is reused or the mm is destroyed.
- * This is harmless: the LDT is always in ordinary memory, and no one will
- * access the freed slot.
- *
- * If we wanted to unmap freed LDTs, we'd also need to do a flush to make
- * it useful, and the flush would slow down modify_ldt().
*/
static int
map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot)
unsigned long va;
bool is_vmalloc;
spinlock_t *ptl;
- pgd_t *pgd;
- int i;
+ int i, nr_pages;
if (!static_cpu_has(X86_FEATURE_PTI))
return 0;
/* Check if the current mappings are sane */
sanity_check_ldt_mapping(mm);
- /*
- * Did we already have the top level entry allocated? We can't
- * use pgd_none() for this because it doens't do anything on
- * 4-level page table kernels.
- */
- pgd = pgd_offset(mm, LDT_BASE_ADDR);
-
is_vmalloc = is_vmalloc_addr(ldt->entries);
- for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) {
+ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+ for (i = 0; i < nr_pages; i++) {
unsigned long offset = i << PAGE_SHIFT;
const void *src = (char *)ldt->entries + offset;
unsigned long pfn;
/* Propagate LDT mapping to the user page-table */
map_ldt_struct_to_user(mm);
- va = (unsigned long)ldt_slot_va(slot);
- flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false);
-
ldt->slot = slot;
return 0;
}
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+ unsigned long va;
+ int i, nr_pages;
+
+ if (!ldt)
+ return;
+
+ /* LDT map/unmap is only required for PTI */
+ if (!static_cpu_has(X86_FEATURE_PTI))
+ return;
+
+ nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE);
+
+ for (i = 0; i < nr_pages; i++) {
+ unsigned long offset = i << PAGE_SHIFT;
+ spinlock_t *ptl;
+ pte_t *ptep;
+
+ va = (unsigned long)ldt_slot_va(ldt->slot) + offset;
+ ptep = get_locked_pte(mm, va, &ptl);
+ pte_clear(mm, va, ptep);
+ pte_unmap_unlock(ptep, ptl);
+ }
+
+ va = (unsigned long)ldt_slot_va(ldt->slot);
+ flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false);
+}
+
#else /* !CONFIG_PAGE_TABLE_ISOLATION */
static int
{
return 0;
}
+
+static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt)
+{
+}
#endif /* CONFIG_PAGE_TABLE_ISOLATION */
static void free_ldt_pgtables(struct mm_struct *mm)
}
install_ldt(mm, new_ldt);
+ unmap_ldt_struct(mm, old_ldt);
free_ldt_struct(old_ldt);
error = 0;
#define TOPOLOGY_REGISTER_OFFSET 0x10
-#if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL
-/*
- * Interrupt control on vSMPowered systems:
- * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
- * and vice versa.
- */
-
-asmlinkage __visible unsigned long vsmp_save_fl(void)
-{
- unsigned long flags = native_save_fl();
-
- if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC))
- flags &= ~X86_EFLAGS_IF;
- return flags;
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl);
-
-__visible void vsmp_restore_fl(unsigned long flags)
-{
- if (flags & X86_EFLAGS_IF)
- flags &= ~X86_EFLAGS_AC;
- else
- flags |= X86_EFLAGS_AC;
- native_restore_fl(flags);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl);
-
-asmlinkage __visible void vsmp_irq_disable(void)
-{
- unsigned long flags = native_save_fl();
-
- native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC);
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable);
-
-asmlinkage __visible void vsmp_irq_enable(void)
-{
- unsigned long flags = native_save_fl();
-
- native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC));
-}
-PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable);
-
-static unsigned __init vsmp_patch(u8 type, void *ibuf,
- unsigned long addr, unsigned len)
-{
- switch (type) {
- case PARAVIRT_PATCH(irq.irq_enable):
- case PARAVIRT_PATCH(irq.irq_disable):
- case PARAVIRT_PATCH(irq.save_fl):
- case PARAVIRT_PATCH(irq.restore_fl):
- return paravirt_patch_default(type, ibuf, addr, len);
- default:
- return native_patch(type, ibuf, addr, len);
- }
-
-}
-
-static void __init set_vsmp_pv_ops(void)
+#ifdef CONFIG_PCI
+static void __init set_vsmp_ctl(void)
{
void __iomem *address;
unsigned int cap, ctl, cfg;
}
#endif
- if (cap & ctl & (1 << 4)) {
- /* Setup irq ops and turn on vSMP IRQ fastpath handling */
- pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable);
- pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable);
- pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl);
- pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl);
- pv_ops.init.patch = vsmp_patch;
- ctl &= ~(1 << 4);
- }
writel(ctl, address + 4);
ctl = readl(address + 4);
pr_info("vSMP CTL: control set to:0x%08x\n", ctl);
early_iounmap(address, 8);
}
-#else
-static void __init set_vsmp_pv_ops(void)
-{
-}
-#endif
-
-#ifdef CONFIG_PCI
static int is_vsmp = -1;
static void __init detect_vsmp_box(void)
{
return 0;
}
+static void __init set_vsmp_ctl(void)
+{
+}
#endif
static void __init vsmp_cap_cpus(void)
{
-#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP)
+#if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
void __iomem *address;
unsigned int cfg, topology, node_shift, maxcpus;
vsmp_cap_cpus();
- set_vsmp_pv_ops();
+ set_vsmp_ctl();
return;
}
init_top_pgt[0] = __pgd(0);
/* Pre-constructed entries are in pfn, so convert to mfn */
- /* L4[272] -> level3_ident_pgt */
+ /* L4[273] -> level3_ident_pgt */
/* L4[511] -> level3_kernel_pgt */
convert_pfn_mfn(init_top_pgt);
addr[0] = (unsigned long)pgd;
addr[1] = (unsigned long)l3;
addr[2] = (unsigned long)l2;
- /* Graft it onto L4[272][0]. Note that we creating an aliasing problem:
- * Both L4[272][0] and L4[511][510] have entries that point to the same
+ /* Graft it onto L4[273][0]. Note that we creating an aliasing problem:
+ * Both L4[273][0] and L4[511][510] have entries that point to the same
* L2 (PMD) tables. Meaning that if you modify it in __va space
* it will be also modified in the __ka space! (But if you just
* modify the PMD table to point to other PTE's or none, then you
/*
* The interface requires atomic updates on p2m elements.
- * xen_safe_write_ulong() is using __put_user which does an atomic
- * store via asm().
+ * xen_safe_write_ulong() is using an atomic store via asm().
*/
if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn)))
return true;
#include <linux/log2.h>
#include <linux/gfp.h>
#include <linux/slab.h>
+#include <linux/atomic.h>
#include <asm/paravirt.h>
#include <asm/qspinlock.h>
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
*/
static void xen_qlock_wait(u8 *byte, u8 val)
{
- unsigned long flags;
int irq = __this_cpu_read(lock_kicker_irq);
+ atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
/* If kicker interrupts not initialized yet, just spin */
if (irq == -1 || in_nmi())
return;
- /* Guard against reentry. */
- local_irq_save(flags);
+ /* Detect reentry. */
+ atomic_inc(nest_cnt);
- /* If irq pending already clear it. */
- if (xen_test_irq_pending(irq)) {
+ /* If irq pending already and no nested call clear it. */
+ if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
xen_clear_irq_pending(irq);
} else if (READ_ONCE(*byte) == val) {
/* Block until irq becomes pending (or a spurious wakeup) */
xen_poll_irq(irq);
}
- local_irq_restore(flags);
+ atomic_dec(nest_cnt);
}
static irqreturn_t dummy_handler(int irq, void *dev_id)
if (ret)
goto cleanup;
} else {
+ zero_fill_bio(bio);
iov_iter_advance(iter, bio->bi_iter.bi_size);
}
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- while (nr_sects) {
- unsigned int req_sects = nr_sects;
- sector_t end_sect;
-
- if (!req_sects)
- goto fail;
- if (req_sects > UINT_MAX >> 9)
- req_sects = UINT_MAX >> 9;
+ if (!nr_sects)
+ return -EINVAL;
- end_sect = sector + req_sects;
+ while (nr_sects) {
+ unsigned int req_sects = min_t(unsigned int, nr_sects,
+ bio_allowed_max_sectors(q));
bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector;
bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9;
+ sector += req_sects;
nr_sects -= req_sects;
- sector = end_sect;
/*
* We can loop for a long time in here, if someone does
*biop = bio;
return 0;
-
-fail:
- if (bio) {
- submit_bio_wait(bio);
- bio_put(bio);
- }
- *biop = NULL;
- return -EOPNOTSUPP;
}
EXPORT_SYMBOL(__blkdev_issue_discard);
return -EOPNOTSUPP;
/* Ensure that max_write_same_sectors doesn't overflow bi_size */
- max_write_same_sectors = UINT_MAX >> 9;
+ max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) {
bio = blk_next_bio(bio, 1, gfp_mask);
bio_get_first_bvec(prev_rq->bio, &pb);
else
bio_get_first_bvec(prev, &pb);
- if (pb.bv_offset)
+ if (pb.bv_offset & queue_virt_boundary(q))
return true;
/*
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
- max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ max_discard_sectors = min(q->limits.max_discard_sectors,
+ bio_allowed_max_sectors(q));
max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) {
static inline bool __bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset)
{
- return offset ||
+ return (offset & queue_virt_boundary(q)) ||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
}
return rq->__deadline & ~0x1UL;
}
+/*
+ * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
+ * is defined as 'unsigned int', meantime it has to aligned to with logical
+ * block size which is the minimum accepted unit by hardware.
+ */
+static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
+{
+ return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
+}
+
/*
* Internal io_context interface
*/
struct acpi_nfit_desc *acpi_desc;
struct nfit_spa *nfit_spa;
- /* We only care about memory errors */
- if (!mce_is_memory_error(mce))
+ /* We only care about uncorrectable memory errors */
+ if (!mce_is_memory_error(mce) || mce_is_correctable(mce))
+ return NOTIFY_DONE;
+
+ /* Verify the address reported in the MCE is valid. */
+ if (!mce_usable_address(mce))
return NOTIFY_DONE;
/*
+// SPDX-License-Identifier: GPL-2.0+
/*
* Renesas R-Car SATA driver
*
* Author: Vladimir Barinov <source@cogentembedded.com>
* Copyright (C) 2013-2015 Cogent Embedded, Inc.
* Copyright (C) 2013-2015 Renesas Solutions Corp.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
*/
#include <linux/kernel.h>
GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+ info->nr_rings = 0;
return -ENOMEM;
}
{
struct clk *clk = platform_get_drvdata(pdev);
+ of_clk_del_provider(pdev->dev.of_node);
clk_unregister_fixed_factor(clk);
return 0;
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
},
};
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div3_div" },
.num_parents = 1,
+ /*
+ * FIXME:
+ * This clock, as fdiv2, is used by the SCPI FW and is required
+ * by the platform to operate correctly.
+ * Until the following condition are met, we need this clock to
+ * be marked as critical:
+ * a) The SCPI generic driver claims and enable all the clocks
+ * it needs
+ * b) CCF has a clock hand-off mechanism to make the sure the
+ * clock stays on until the proper driver comes along
+ */
+ .flags = CLK_IS_CRITICAL,
},
};
.div = 1,
.hw.init = &(struct clk_init_data){
.name = "cxo",
- .parent_names = (const char *[]){ "xo_board" },
+ .parent_names = (const char *[]){ "xo-board" },
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
},
DEFINE_RAW_SPINLOCK(i8253_lock);
EXPORT_SYMBOL(i8253_lock);
+/*
+ * Handle PIT quirk in pit_shutdown() where zeroing the counter register
+ * restarts the PIT, negating the shutdown. On platforms with the quirk,
+ * platform specific code can set this to false.
+ */
+bool i8253_clear_counter_on_shutdown __ro_after_init = true;
+
#ifdef CONFIG_CLKSRC_I8253
/*
* Since the PIT overflows every tick, its not very useful
raw_spin_lock(&i8253_lock);
outb_p(0x30, PIT_MODE);
- outb_p(0, PIT_CH0);
- outb_p(0, PIT_CH0);
+
+ if (i8253_clear_counter_on_shutdown) {
+ outb_p(0, PIT_CH0);
+ outb_p(0, PIT_CH0);
+ }
raw_spin_unlock(&i8253_lock);
return 0;
extern int amdgpu_gpu_recovery;
extern int amdgpu_emu_mode;
extern uint amdgpu_smu_memory_pool_size;
+extern uint amdgpu_dc_feature_mask;
extern struct amdgpu_mgpu_info mgpu_info;
#ifdef CONFIG_DRM_AMDGPU_SI
int amdgpu_gpu_recovery = -1; /* auto */
int amdgpu_emu_mode = 0;
uint amdgpu_smu_memory_pool_size = 0;
+/* FBC (bit 0) disabled by default*/
+uint amdgpu_dc_feature_mask = 0;
+
struct amdgpu_mgpu_info mgpu_info = {
.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
};
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
#endif
+/**
+ * DOC: dcfeaturemask (uint)
+ * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
+ * The default is the current set of stable display features.
+ */
+MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
+module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
+ adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
}
return 0;
}
adev->asic_type < CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
+ if (amdgpu_dc_feature_mask & DC_FBC_MASK)
+ init_data.flags.fbc_support = true;
+
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
- /*
- * PWM interperts 0 as 100% rather than 0% because of HW
- * limitation for level 0.So limiting minimum brightness level
- * to 1.
- */
- if (bd->props.brightness < 1)
- return 1;
if (dc_link_set_backlight_level(dm->backlight_link,
bd->props.brightness, 0, 0))
return 0;
drm_connector = &aconnector->base;
if (!aconnector->dc_sink) {
- /*
- * Create dc_sink when necessary to MST
- * Don't apply fake_sink to MST
- */
- if (aconnector->mst_port) {
- dm_dp_mst_dc_sink_create(drm_connector);
- return stream;
+ if (!aconnector->mst_port) {
+ sink = create_fake_sink(aconnector);
+ if (!sink)
+ return stream;
}
-
- sink = create_fake_sink(aconnector);
- if (!sink)
- return stream;
} else {
sink = aconnector->dc_sink;
}
static const struct drm_plane_funcs dm_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
+ .destroy = drm_primary_helper_destroy,
.reset = dm_drm_plane_reset,
.atomic_duplicate_state = dm_drm_plane_duplicate_state,
.atomic_destroy_state = dm_drm_plane_destroy_state,
struct mutex hpd_lock;
bool fake_enable;
-
- bool mst_connected;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
.atomic_get_property = amdgpu_dm_connector_atomic_get_property
};
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
-{
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
- struct dc_sink *dc_sink;
- struct dc_sink_init_data init_params = {
- .link = aconnector->dc_link,
- .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
-
- /* FIXME none of this is safe. we shouldn't touch aconnector here in
- * atomic_check
- */
-
- /*
- * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
- */
- if (!aconnector->port || !aconnector->port->aux.ddc.algo)
- return;
-
- ASSERT(aconnector->edid);
-
- dc_sink = dc_link_add_remote_sink(
- aconnector->dc_link,
- (uint8_t *)aconnector->edid,
- (aconnector->edid->extensions + 1) * EDID_LENGTH,
- &init_params);
-
- dc_sink->priv = aconnector;
- aconnector->dc_sink = dc_sink;
-
- if (aconnector->dc_sink)
- amdgpu_dm_update_freesync_caps(
- connector, aconnector->edid);
-}
-
static int dm_dp_mst_get_modes(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_encoder *amdgpu_encoder;
struct drm_encoder *encoder;
- const struct drm_connector_helper_funcs *connector_funcs =
- connector->base.helper_private;
- struct drm_encoder *enc_master =
- connector_funcs->best_encoder(&connector->base);
- DRM_DEBUG_KMS("enc master is %p\n", enc_master);
amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
if (!amdgpu_encoder)
return NULL;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->mst_port == master
- && !aconnector->port) {
- DRM_INFO("DM_MST: reusing connector: %p [id: %d] [master: %p]\n",
- aconnector, connector->base.id, aconnector->mst_port);
-
- aconnector->port = port;
- drm_connector_set_path_property(connector, pathprop);
-
- drm_connector_list_iter_end(&conn_iter);
- aconnector->mst_connected = true;
- return &aconnector->base;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
*/
amdgpu_dm_connector_funcs_reset(connector);
- aconnector->mst_connected = true;
-
DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n",
aconnector, connector->base.id, aconnector->mst_port);
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
+ struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
+ struct drm_device *dev = master->base.dev;
+ struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
aconnector->dc_sink = NULL;
}
- aconnector->mst_connected = false;
+ drm_connector_unregister(connector);
+ if (adev->mode_info.rfbdev)
+ drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
+ drm_connector_put(connector);
}
static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
drm_kms_helper_hotplug_event(dev);
}
-static void dm_dp_mst_link_status_reset(struct drm_connector *connector)
-{
- mutex_lock(&connector->dev->mode_config.mutex);
- drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD);
- mutex_unlock(&connector->dev->mode_config.mutex);
-}
-
static void dm_dp_mst_register_connector(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
if (adev->mode_info.rfbdev)
drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
drm_connector_register(connector);
-
- if (aconnector->mst_connected)
- dm_dp_mst_link_status_reset(connector);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector);
-void dm_dp_mst_dc_sink_create(struct drm_connector *connector);
#endif
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
i2c_success = i2c_write(pipe_ctx, slave_address,
buffer, sizeof(buffer));
RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\
- offset = 0x%d, reg_val = 0x%d, i2c_success = %d\n",
+ offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
/* Write failure */
struct dc_config {
bool gpu_vm_support;
bool disable_disp_pll_sharing;
+ bool fbc_support;
};
enum visual_confirm {
if (events->force_trigger)
value |= 0x1;
- value |= 0x84;
+ if (num_pipes) {
+ struct dc *dc = pipe_ctx[0]->stream->ctx->dc;
+
+ if (dc->fbc_compressor)
+ value |= 0x84;
+ }
for (i = 0; i < num_pipes; i++)
pipe_ctx[i]->stream_res.tg->funcs->
pool->base.sw_i2cs[i] = NULL;
}
- dc->fbc_compressor = dce110_compressor_create(ctx);
+ if (dc->config.fbc_support)
+ dc->fbc_compressor = dce110_compressor_create(ctx);
if (!underlay_create(ctx, &pool->base))
goto res_create_fail;
PP_AVFS_MASK = 0x40000,
};
+enum DC_FEATURE_MASK {
+ DC_FBC_MASK = 0x1,
+};
+
/**
* struct amd_ip_funcs - general hooks for managing amdgpu IP Blocks
*/
struct atom_common_table_header table_header;
uint8_t smuip_min_ver;
uint8_t smuip_max_ver;
- uint8_t smu_rsd1;
+ uint8_t waflclk_ss_mode;
uint8_t gpuclk_ss_mode;
uint16_t sclk_ss_percentage;
uint16_t sclk_ss_rate_10hz;
uint32_t syspll3_1_vco_freq_10khz;
uint32_t bootup_fclk_10khz;
uint32_t bootup_waflclk_10khz;
- uint32_t reserved[3];
+ uint32_t smu_info_caps;
+ uint16_t waflclk_ss_percentage; // in unit of 0.001%
+ uint16_t smuinitoffset;
+ uint32_t reserved;
};
/*
data->registry_data.disable_auto_wattman = 1;
data->registry_data.auto_wattman_debug = 0;
data->registry_data.auto_wattman_sample_period = 100;
+ data->registry_data.fclk_gfxclk_ratio = 0x3F6CCCCD;
data->registry_data.auto_wattman_threshold = 50;
data->registry_data.gfxoff_controlled_by_driver = 1;
data->gfxoff_allowed = false;
return 0;
}
+static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
+
+ if (data->smu_features[GNLD_DPM_UCLK].enabled)
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetUclkFastSwitch,
+ 1);
+
+ return 0;
+}
+
+static int vega20_send_clock_ratio(struct pp_hwmgr *hwmgr)
+{
+ struct vega20_hwmgr *data =
+ (struct vega20_hwmgr *)(hwmgr->backend);
+
+ return smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetFclkGfxClkRatio,
+ data->registry_data.fclk_gfxclk_ratio);
+}
+
static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
{
struct vega20_hwmgr *data =
"[EnableDPMTasks] Failed to enable all smu features!",
return result);
+ result = vega20_notify_smc_display_change(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to notify smc display change!",
+ return result);
+
+ result = vega20_send_clock_ratio(hwmgr);
+ PP_ASSERT_WITH_CODE(!result,
+ "[EnableDPMTasks] Failed to send clock ratio!",
+ return result);
+
/* Initialize UVD/VCE powergating state */
vega20_init_powergate_state(hwmgr);
return ret;
}
-static int vega20_notify_smc_display_change(struct pp_hwmgr *hwmgr,
- bool has_disp)
-{
- struct vega20_hwmgr *data = (struct vega20_hwmgr *)(hwmgr->backend);
-
- if (data->smu_features[GNLD_DPM_UCLK].enabled)
- return smum_send_msg_to_smc_with_parameter(hwmgr,
- PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 1 : 0);
-
- return 0;
-}
-
int vega20_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
struct pp_display_clock_request *clock_req)
{
struct pp_display_clock_request clock_req;
int ret = 0;
- if ((hwmgr->display_config->num_display > 1) &&
- !hwmgr->display_config->multi_monitor_in_sync &&
- !hwmgr->display_config->nb_pstate_switch_disable)
- vega20_notify_smc_display_change(hwmgr, false);
- else
- vega20_notify_smc_display_change(hwmgr, true);
-
min_clocks.dcefClock = hwmgr->display_config->min_dcef_set_clk;
min_clocks.dcefClockInSR = hwmgr->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memoryClock = hwmgr->display_config->min_mem_set_clock;
uint8_t disable_auto_wattman;
uint32_t auto_wattman_debug;
uint32_t auto_wattman_sample_period;
+ uint32_t fclk_gfxclk_ratio;
uint8_t auto_wattman_threshold;
uint8_t log_avfs_param;
uint8_t enable_enginess;
#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B
#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C
#define PPSMC_MSG_WaflTest 0x4D
-// Unused ID 0x4E to 0x50
+#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E
+// Unused ID 0x4F to 0x50
#define PPSMC_MSG_AllowGfxOff 0x51
#define PPSMC_MSG_DisallowGfxOff 0x52
#define PPSMC_MSG_GetPptLimit 0x53
* If the GPU managed to complete this jobs fence, the timout is
* spurious. Bail out.
*/
- if (fence_completed(gpu, submit->out_fence->seqno))
+ if (dma_fence_is_signaled(submit->out_fence))
return;
/*
return frm;
}
-static u32 decon_get_vblank_counter(struct exynos_drm_crtc *crtc)
-{
- struct decon_context *ctx = crtc->ctx;
-
- return decon_get_frame_count(ctx, false);
-}
-
static void decon_setup_trigger(struct decon_context *ctx)
{
if (!ctx->crtc->i80_mode && !(ctx->out_type & I80_HW_TRG))
.disable = decon_disable,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
- .get_vblank_counter = decon_get_vblank_counter,
.atomic_begin = decon_atomic_begin,
.update_plane = decon_update_plane,
.disable_plane = decon_disable_plane,
int ret;
ctx->drm_dev = drm_dev;
- drm_dev->max_vblank_count = 0xffffffff;
for (win = ctx->first_win; win < WINDOWS_NR; win++) {
ctx->configs[win].pixel_formats = decon_formats;
exynos_crtc->ops->disable_vblank(exynos_crtc);
}
-static u32 exynos_drm_crtc_get_vblank_counter(struct drm_crtc *crtc)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->get_vblank_counter)
- return exynos_crtc->ops->get_vblank_counter(exynos_crtc);
-
- return 0;
-}
-
static const struct drm_crtc_funcs exynos_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
- .get_vblank_counter = exynos_drm_crtc_get_vblank_counter,
};
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
void (*disable)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
- u32 (*get_vblank_counter)(struct exynos_drm_crtc *crtc);
enum drm_mode_status (*mode_valid)(struct exynos_drm_crtc *crtc,
const struct drm_display_mode *mode);
bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
#include <drm/drm_atomic_helper.h>
{
struct exynos_dsi *dsi = encoder_to_dsi(encoder);
struct drm_connector *connector = &dsi->connector;
+ struct drm_device *drm = encoder->dev;
int ret;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- ret = drm_connector_init(encoder->dev, connector,
- &exynos_dsi_connector_funcs,
+ ret = drm_connector_init(drm, connector, &exynos_dsi_connector_funcs,
DRM_MODE_CONNECTOR_DSI);
if (ret) {
DRM_ERROR("Failed to initialize connector with drm\n");
connector->status = connector_status_disconnected;
drm_connector_helper_add(connector, &exynos_dsi_connector_helper_funcs);
drm_connector_attach_encoder(connector, encoder);
+ if (!drm->registered)
+ return 0;
+ connector->funcs->reset(connector);
+ drm_fb_helper_add_one_connector(drm->fb_helper, connector);
+ drm_connector_register(connector);
return 0;
}
}
dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (dsi->panel) {
+ if (IS_ERR(dsi->panel)) {
+ dsi->panel = NULL;
+ } else {
drm_panel_attach(dsi->panel, &dsi->connector);
dsi->connector.status = connector_status_connected;
}
struct drm_fb_helper *helper;
int ret;
- if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+ if (!dev->mode_config.num_crtc)
return 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
- mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
- mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
struct intel_gvt_gtt_entry e, m;
dma_addr_t dma_addr;
int ret;
+ struct intel_gvt_partial_pte *partial_pte, *pos, *n;
+ bool partial_update = false;
if (bytes != 4 && bytes != 8)
return -EINVAL;
if (!vgpu_gmadr_is_valid(vgpu, gma))
return 0;
- ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index);
-
+ e.type = GTT_TYPE_GGTT_PTE;
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
/* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
- * write, we assume the two 4 bytes writes are consecutive.
- * Otherwise, we abort and report error
+ * write, save the first 4 bytes in a list and update virtual
+ * PTE. Only update shadow PTE when the second 4 bytes comes.
*/
if (bytes < info->gtt_entry_size) {
- if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
- /* the first partial part*/
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
- return 0;
- } else if ((g_gtt_index ==
- (ggtt_mm->ggtt_mm.last_partial_off >>
- info->gtt_entry_size_shift)) &&
- (off != ggtt_mm->ggtt_mm.last_partial_off)) {
- /* the second partial part */
-
- int last_off = ggtt_mm->ggtt_mm.last_partial_off &
- (info->gtt_entry_size - 1);
-
- memcpy((void *)&e.val64 + last_off,
- (void *)&ggtt_mm->ggtt_mm.last_partial_data +
- last_off, bytes);
-
- ggtt_mm->ggtt_mm.last_partial_off = -1UL;
- } else {
- int last_offset;
-
- gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
- ggtt_mm->ggtt_mm.last_partial_off, off,
- bytes, info->gtt_entry_size);
-
- /* set host ggtt entry to scratch page and clear
- * virtual ggtt entry as not present for last
- * partially write offset
- */
- last_offset = ggtt_mm->ggtt_mm.last_partial_off &
- (~(info->gtt_entry_size - 1));
-
- ggtt_get_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate_pte(vgpu, &m);
- ops->set_pfn(&m, gvt->gtt.scratch_mfn);
- ops->clear_present(&m);
- ggtt_set_host_entry(ggtt_mm, &m, last_offset);
- ggtt_invalidate(gvt->dev_priv);
-
- ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
- ops->clear_present(&e);
- ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
-
- ggtt_mm->ggtt_mm.last_partial_off = off;
- ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ bool found = false;
+
+ list_for_each_entry_safe(pos, n,
+ &ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ if (g_gtt_index == pos->offset >>
+ info->gtt_entry_size_shift) {
+ if (off != pos->offset) {
+ /* the second partial part*/
+ int last_off = pos->offset &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&pos->data + last_off,
+ bytes);
+
+ list_del(&pos->list);
+ kfree(pos);
+ found = true;
+ break;
+ }
+
+ /* update of the first partial part */
+ pos->data = e.val64;
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+ return 0;
+ }
+ }
- return 0;
+ if (!found) {
+ /* the first partial part */
+ partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
+ if (!partial_pte)
+ return -ENOMEM;
+ partial_pte->offset = off;
+ partial_pte->data = e.val64;
+ list_add_tail(&partial_pte->list,
+ &ggtt_mm->ggtt_mm.partial_pte_list);
+ partial_update = true;
}
}
- if (ops->test_present(&e)) {
+ if (!partial_update && (ops->test_present(&e))) {
gfn = ops->get_pfn(&e);
m = e;
} else
ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
} else {
- ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate_pte(vgpu, &m);
ops->set_pfn(&m, gvt->gtt.scratch_mfn);
ops->clear_present(&m);
}
out:
+ ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
+
+ ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
+ ggtt_invalidate_pte(vgpu, &e);
+
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
ggtt_invalidate(gvt->dev_priv);
- ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
return 0;
}
intel_vgpu_reset_ggtt(vgpu, false);
+ INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list);
+
return create_scratch_page_tree(vgpu);
}
static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
{
+ struct intel_gvt_partial_pte *pos;
+
+ list_for_each_entry(pos,
+ &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list, list) {
+ gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
+ pos->offset, pos->data);
+ kfree(pos);
+ }
intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
vgpu->gtt.ggtt_mm = NULL;
}
#define _GVT_GTT_H_
#define I915_GTT_PAGE_SHIFT 12
-#define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
struct intel_vgpu_mm;
#define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
+struct intel_gvt_partial_pte {
+ unsigned long offset;
+ u64 data;
+ struct list_head list;
+};
+
struct intel_vgpu_mm {
enum intel_gvt_mm_type type;
struct intel_vgpu *vgpu;
} ppgtt_mm;
struct {
void *virtual_ggtt;
- unsigned long last_partial_off;
- u64 last_partial_data;
+ struct list_head partial_pte_list;
} ggtt_mm;
};
};
return 0;
}
-static int bxt_edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
+static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
vgpu_vreg(vgpu, offset) = 0;
MMIO_DFH(_MMIO(0x1a178), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x1a17c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x2217c), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
+
+ MMIO_DH(EDP_PSR_IMR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
+ MMIO_DH(EDP_PSR_IIR, D_BDW_PLUS, NULL, edp_psr_imr_iir_write);
return 0;
}
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_B), D_BXT);
MMIO_D(HSW_TVIDEO_DIP_GCP(TRANSCODER_C), D_BXT);
- MMIO_DH(EDP_PSR_IMR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
- MMIO_DH(EDP_PSR_IIR, D_BXT, NULL, bxt_edp_psr_imr_iir_write);
-
MMIO_D(RC6_CTX_BASE, D_BXT);
MMIO_D(GEN8_PUSHBUS_CONTROL, D_BXT);
{RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
{RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
- {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
+ {RCS, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
{RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
{RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
return -EINVAL;
}
- dram_info->valid_dimm = true;
-
/*
* If any of the channel is single rank channel, worst case output
* will be same as if single rank memory, so consider single rank
return -EINVAL;
}
- if (ch0.is_16gb_dimm || ch1.is_16gb_dimm)
- dram_info->is_16gb_dimm = true;
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
dev_priv->dram_info.symmetric_memory = intel_is_dram_symmetric(val_ch0,
val_ch1,
return -EINVAL;
}
- dram_info->valid_dimm = true;
dram_info->valid = true;
return 0;
}
int ret;
dram_info->valid = false;
- dram_info->valid_dimm = false;
- dram_info->is_16gb_dimm = false;
dram_info->rank = I915_DRAM_RANK_INVALID;
dram_info->bandwidth_kbps = 0;
dram_info->num_channels = 0;
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
+
if (INTEL_GEN(dev_priv) < 9 || IS_GEMINILAKE(dev_priv))
return;
struct dram_info {
bool valid;
- bool valid_dimm;
bool is_16gb_dimm;
u8 num_channels;
enum dram_rank {
* any non-page-aligned or non-canonical addresses.
*/
if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
- entry->offset != gen8_canonical_addr(entry->offset & PAGE_MASK)))
+ entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
return -EINVAL;
/* pad_to_size was once a reserved field, so sanitize it */
if (i == 4)
continue;
- seq_printf(m, "\t\t(%03d, %04d) %08lx: ",
+ seq_printf(m, "\t\t(%03d, %04d) %08llx: ",
pde, pte,
(pde * GEN6_PTES + pte) * I915_GTT_PAGE_SIZE);
for (i = 0; i < 4; i++) {
#include "i915_selftest.h"
#include "i915_timeline.h"
-#define I915_GTT_PAGE_SIZE_4K BIT(12)
-#define I915_GTT_PAGE_SIZE_64K BIT(16)
-#define I915_GTT_PAGE_SIZE_2M BIT(21)
+#define I915_GTT_PAGE_SIZE_4K BIT_ULL(12)
+#define I915_GTT_PAGE_SIZE_64K BIT_ULL(16)
+#define I915_GTT_PAGE_SIZE_2M BIT_ULL(21)
#define I915_GTT_PAGE_SIZE I915_GTT_PAGE_SIZE_4K
#define I915_GTT_MAX_PAGE_SIZE I915_GTT_PAGE_SIZE_2M
+#define I915_GTT_PAGE_MASK -I915_GTT_PAGE_SIZE
+
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
#define I915_FENCE_REG_NONE -1
u64 start, u64 end, unsigned int flags);
/* Flags used by pin/bind&friends. */
-#define PIN_NONBLOCK BIT(0)
-#define PIN_MAPPABLE BIT(1)
-#define PIN_ZONE_4G BIT(2)
-#define PIN_NONFAULT BIT(3)
-#define PIN_NOEVICT BIT(4)
-
-#define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
-#define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
-#define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
-#define PIN_UPDATE BIT(8)
-
-#define PIN_HIGH BIT(9)
-#define PIN_OFFSET_BIAS BIT(10)
-#define PIN_OFFSET_FIXED BIT(11)
+#define PIN_NONBLOCK BIT_ULL(0)
+#define PIN_MAPPABLE BIT_ULL(1)
+#define PIN_ZONE_4G BIT_ULL(2)
+#define PIN_NONFAULT BIT_ULL(3)
+#define PIN_NOEVICT BIT_ULL(4)
+
+#define PIN_MBZ BIT_ULL(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL BIT_ULL(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER BIT_ULL(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE BIT_ULL(8)
+
+#define PIN_HIGH BIT_ULL(9)
+#define PIN_OFFSET_BIAS BIT_ULL(10)
+#define PIN_OFFSET_FIXED BIT_ULL(11)
#define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)
#endif
/* ICL PHY DFLEX registers */
#define PORT_TX_DFLEXDPMLE1 _MMIO(0x1638C0)
-#define DFLEXDPMLE1_DPMLETC_MASK(n) (0xf << (4 * (n)))
-#define DFLEXDPMLE1_DPMLETC(n, x) ((x) << (4 * (n)))
+#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port)))
+#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port)))
/* BXT PHY Ref registers */
#define _PORT_REF_DW3_A 0x16218C
#define DRM_DIP_ENABLE (1 << 28)
#define PSR_VSC_BIT_7_SET (1 << 27)
-#define VSC_SELECT_MASK (0x3 << 26)
-#define VSC_SELECT_SHIFT 26
-#define VSC_DIP_HW_HEA_DATA (0 << 26)
-#define VSC_DIP_HW_HEA_SW_DATA (1 << 26)
-#define VSC_DIP_HW_DATA_SW_HEA (2 << 26)
-#define VSC_DIP_SW_HEA_DATA (3 << 26)
+#define VSC_SELECT_MASK (0x3 << 25)
+#define VSC_SELECT_SHIFT 25
+#define VSC_DIP_HW_HEA_DATA (0 << 25)
+#define VSC_DIP_HW_HEA_SW_DATA (1 << 25)
+#define VSC_DIP_HW_DATA_SW_HEA (2 << 25)
+#define VSC_DIP_SW_HEA_DATA (3 << 25)
#define VDIP_ENABLE_PPS (1 << 24)
/* Panel power sequencing */
/* HDMI N/CTS table */
#define TMDS_297M 297000
#define TMDS_296M 296703
+#define TMDS_594M 594000
+#define TMDS_593M 593407
+
static const struct {
int sample_rate;
int clock;
{ 176400, TMDS_297M, 18816, 247500 },
{ 192000, TMDS_296M, 23296, 281250 },
{ 192000, TMDS_297M, 20480, 247500 },
+ { 44100, TMDS_593M, 8918, 937500 },
+ { 44100, TMDS_594M, 9408, 990000 },
+ { 48000, TMDS_593M, 5824, 562500 },
+ { 48000, TMDS_594M, 6144, 594000 },
+ { 32000, TMDS_593M, 5824, 843750 },
+ { 32000, TMDS_594M, 3072, 445500 },
+ { 88200, TMDS_593M, 17836, 937500 },
+ { 88200, TMDS_594M, 18816, 990000 },
+ { 96000, TMDS_593M, 11648, 562500 },
+ { 96000, TMDS_594M, 12288, 594000 },
+ { 176400, TMDS_593M, 35672, 937500 },
+ { 176400, TMDS_594M, 37632, 990000 },
+ { 192000, TMDS_593M, 23296, 562500 },
+ { 192000, TMDS_594M, 24576, 594000 },
};
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv,
int pixel_rate)
{
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return DIV_ROUND_UP(pixel_rate, 2);
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Avoid using a pixel clock that is more than 99% of the cdclk
- * as a temporary workaround. Use a higher cdclk instead. (Note that
- * intel_compute_max_dotclk() limits the max pixel clock to 99% of max
- * cdclk.)
- */
- return DIV_ROUND_UP(pixel_rate * 100, 2 * 99);
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return pixel_rate;
{
int max_cdclk_freq = dev_priv->max_cdclk_freq;
- if (INTEL_GEN(dev_priv) >= 10)
+ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
return 2 * max_cdclk_freq;
- else if (IS_GEMINILAKE(dev_priv))
- /*
- * FIXME: Limiting to 99% as a temporary workaround. See
- * intel_min_cdclk() for details.
- */
- return 2 * max_cdclk_freq * 99 / 100;
else if (IS_GEN9(dev_priv) ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
return max_cdclk_freq;
intel_check_cpu_fifo_underruns(dev_priv);
intel_check_pch_fifo_underruns(dev_priv);
- if (!new_crtc_state->active) {
- /*
- * Make sure we don't call initial_watermarks
- * for ILK-style watermark updates.
- *
- * No clue what this is supposed to achieve.
- */
- if (INTEL_GEN(dev_priv) >= 9)
- dev_priv->display.initial_watermarks(intel_state,
- to_intel_crtc_state(new_crtc_state));
- }
+ /* FIXME unify this for all platforms */
+ if (!new_crtc_state->active &&
+ !HAS_GMCH_DISPLAY(dev_priv) &&
+ dev_priv->display.initial_watermarks)
+ dev_priv->display.initial_watermarks(intel_state,
+ to_intel_crtc_state(new_crtc_state));
}
}
fb->height < SKL_MIN_YUV_420_SRC_H ||
(fb->width % 4) != 0 || (fb->height % 4) != 0)) {
DRM_DEBUG_KMS("src dimensions not correct for NV12\n");
- return -EINVAL;
+ goto err;
}
for (i = 0; i < fb->format->num_planes; i++) {
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
-}
+ dev_priv->lpe_audio.irq = -1;
+ dev_priv->lpe_audio.platdev = NULL;
+}
/**
* intel_lpe_audio_notify() - notify lpe audio event
* any underrun. If not able to get Dimm info assume 16GB dimm
* to avoid any underrun.
*/
- if (!dev_priv->dram_info.valid_dimm ||
- dev_priv->dram_info.is_16gb_dimm)
+ if (dev_priv->dram_info.is_16gb_dimm)
wm[0] += 1;
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
err = igt_check_page_sizes(vma);
if (vma->page_sizes.gtt != I915_GTT_PAGE_SIZE_4K) {
- pr_err("page_sizes.gtt=%u, expected %lu\n",
+ pr_err("page_sizes.gtt=%u, expected %llu\n",
vma->page_sizes.gtt, I915_GTT_PAGE_SIZE_4K);
err = -EINVAL;
}
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != total ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
total, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
if (vma->node.start != offset ||
vma->node.size != 2*I915_GTT_PAGE_SIZE) {
- pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
+ pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %llx)\n",
vma->node.start, vma->node.size,
offset, 2*I915_GTT_PAGE_SIZE);
err = -EINVAL;
DRM_DEBUG_DRIVER("Enabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
DRM_DEBUG_DRIVER("Disabling LVDS output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
DRM_DEBUG_DRIVER("Enabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_prepare(tcon->panel);
drm_panel_enable(tcon->panel);
}
DRM_DEBUG_DRIVER("Disabling RGB output\n");
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
drm_panel_disable(tcon->panel);
drm_panel_unprepare(tcon->panel);
}
sun4i_tcon0_mode_set_common(tcon, mode);
/* Set dithering if needed */
- sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
+ if (tcon->panel)
+ sun4i_tcon0_mode_set_dithering(tcon, tcon->panel->connector);
/* Adjust clock delay */
clk_delay = sun4i_tcon_get_clk_delay(mode, 0);
* Following code is a way to avoid quirks all around TCON
* and DOTCLOCK drivers.
*/
- if (!IS_ERR(tcon->panel)) {
+ if (tcon->panel) {
struct drm_panel *panel = tcon->panel;
struct drm_connector *connector = panel->connector;
struct drm_display_info display_info = connector->display_info;
mutex_unlock(&vgasr_mutex);
return -EINVAL;
}
+ /* notify if GPU has been already bound */
+ if (ops->gpu_bound)
+ ops->gpu_bound(pdev, id);
}
mutex_unlock(&vgasr_mutex);
return ret;
}
+static int alps_sp_open(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ return hid_hw_open(hid);
+}
+
+static void alps_sp_close(struct input_dev *dev)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+
+ hid_hw_close(hid);
+}
+
static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct alps_dev *data = hid_get_drvdata(hdev);
input2->id.version = input->id.version;
input2->dev.parent = input->dev.parent;
+ input_set_drvdata(input2, hdev);
+ input2->open = alps_sp_open;
+ input2->close = alps_sp_close;
+
__set_bit(EV_KEY, input2->evbit);
data->sp_btn_cnt = (data->sp_btn_info & 0x0F);
for (i = 0; i < data->sp_btn_cnt; i++)
u32 value;
int ret;
+ if (!IS_ENABLED(CONFIG_ASUS_WMI))
+ return false;
+
ret = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS2,
ASUS_WMI_DEVID_KBD_BACKLIGHT, 0, &value);
hid_dbg(hdev, "WMI backlight check: rc %d value %x", ret, value);
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
+#define I2C_VENDOR_ID_RAYDIUM 0x2386
+#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
+
#define USB_VENDOR_ID_RAZER 0x1532
#define USB_DEVICE_ID_RAZER_BLADE_14 0x011D
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCS, USB_DEVICE_ID_MCS_GAMEPADBLOCK), HID_QUIRK_MULTI_INPUT },
- { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2), HID_QUIRK_NO_INIT_REPORTS },
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
+#define I2C_HID_QUIRK_DELAY_AFTER_SLEEP BIT(3)
/* flags */
#define I2C_HID_STARTED 0
bool irq_wake_enabled;
struct mutex reset_lock;
+
+ unsigned long sleep_delay;
};
static const struct i2c_hid_quirks {
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
I2C_HID_QUIRK_NO_RUNTIME_PM },
+ { I2C_VENDOR_ID_RAYDIUM, I2C_PRODUCT_ID_RAYDIUM_4B33,
+ I2C_HID_QUIRK_DELAY_AFTER_SLEEP },
{ 0, 0 }
};
{
struct i2c_hid *ihid = i2c_get_clientdata(client);
int ret;
+ unsigned long now, delay;
i2c_hid_dbg(ihid, "%s\n", __func__);
goto set_pwr_exit;
}
+ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+ power_state == I2C_HID_PWR_ON) {
+ now = jiffies;
+ if (time_after(ihid->sleep_delay, now)) {
+ delay = jiffies_to_usecs(ihid->sleep_delay - now);
+ usleep_range(delay, delay + 1);
+ }
+ }
+
ret = __i2c_hid_command(client, &hid_set_power_cmd, power_state,
0, NULL, 0, NULL, 0);
+ if (ihid->quirks & I2C_HID_QUIRK_DELAY_AFTER_SLEEP &&
+ power_state == I2C_HID_PWR_SLEEP)
+ ihid->sleep_delay = jiffies + msecs_to_jiffies(20);
+
if (ret)
dev_err(&client->dev, "failed to change power setting.\n");
},
.driver_data = (void *)&sipodev_desc
},
+ {
+ .ident = "Direkt-Tek DTLAPY133-1",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "DTLAPY133-1"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
{
.ident = "Mediacom Flexbook Edge 11",
.matches = {
if (cmd == HIDIOCGCOLLECTIONINDEX) {
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->maxusage);
} else if (uref->usage_index >= field->report_count)
goto inval;
}
- if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
- (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
- uref->usage_index + uref_multi->num_values > field->report_count))
- goto inval;
+ if (cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) {
+ if (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+ uref->usage_index + uref_multi->num_values >
+ field->report_count)
+ goto inval;
+
+ uref->usage_index =
+ array_index_nospec(uref->usage_index,
+ field->report_count -
+ uref_multi->num_values);
+ }
switch (cmd) {
case HIDIOCGUSAGE:
if (info[i]->config[j] & HWMON_T_INPUT) {
err = hwmon_thermal_add_sensor(dev,
hwdev, j);
- if (err)
- goto free_device;
+ if (err) {
+ device_unregister(hdev);
+ goto ida_remove;
+ }
}
}
}
return hdev;
-free_device:
- device_unregister(hdev);
free_hwmon:
kfree(hwdev);
ida_remove:
return sprintf(buf, "%s\n", sdata->label);
}
-static int __init get_logical_cpu(int hwcpu)
+static int get_logical_cpu(int hwcpu)
{
int cpu;
return -ENOENT;
}
-static void __init make_sensor_label(struct device_node *np,
- struct sensor_data *sdata,
- const char *label)
+static void make_sensor_label(struct device_node *np,
+ struct sensor_data *sdata, const char *label)
{
u32 id;
size_t n;
This driver can also be built as a module. If so, the module
will be called i2c-nforce2-s4985.
+config I2C_NVIDIA_GPU
+ tristate "NVIDIA GPU I2C controller"
+ depends on PCI
+ help
+ If you say yes to this option, support will be included for the
+ NVIDIA GPU I2C controller which is used to communicate with the GPU's
+ Type-C controller. This driver can also be built as a module called
+ i2c-nvidia-gpu.
+
config I2C_SIS5595
tristate "SiS 5595"
depends on PCI
config I2C_OMAP
tristate "OMAP I2C adapter"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP || ARCH_K3
default y if MACH_OMAP_H3 || MACH_OMAP_OSK
help
If you say yes to this option, support will be included for the
obj-$(CONFIG_I2C_ISMT) += i2c-ismt.o
obj-$(CONFIG_I2C_NFORCE2) += i2c-nforce2.o
obj-$(CONFIG_I2C_NFORCE2_S4985) += i2c-nforce2-s4985.o
+obj-$(CONFIG_I2C_NVIDIA_GPU) += i2c-nvidia-gpu.o
obj-$(CONFIG_I2C_PIIX4) += i2c-piix4.o
obj-$(CONFIG_I2C_SIS5595) += i2c-sis5595.o
obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nvidia GPU I2C controller Driver
+ *
+ * Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ */
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/unaligned.h>
+
+/* I2C definitions */
+#define I2C_MST_CNTL 0x00
+#define I2C_MST_CNTL_GEN_START BIT(0)
+#define I2C_MST_CNTL_GEN_STOP BIT(1)
+#define I2C_MST_CNTL_CMD_READ (1 << 2)
+#define I2C_MST_CNTL_CMD_WRITE (2 << 2)
+#define I2C_MST_CNTL_BURST_SIZE_SHIFT 6
+#define I2C_MST_CNTL_GEN_NACK BIT(28)
+#define I2C_MST_CNTL_STATUS GENMASK(30, 29)
+#define I2C_MST_CNTL_STATUS_OKAY (0 << 29)
+#define I2C_MST_CNTL_STATUS_NO_ACK (1 << 29)
+#define I2C_MST_CNTL_STATUS_TIMEOUT (2 << 29)
+#define I2C_MST_CNTL_STATUS_BUS_BUSY (3 << 29)
+#define I2C_MST_CNTL_CYCLE_TRIGGER BIT(31)
+
+#define I2C_MST_ADDR 0x04
+
+#define I2C_MST_I2C0_TIMING 0x08
+#define I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ 0x10e
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT 16
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX 255
+#define I2C_MST_I2C0_TIMING_TIMEOUT_CHECK BIT(24)
+
+#define I2C_MST_DATA 0x0c
+
+#define I2C_MST_HYBRID_PADCTL 0x20
+#define I2C_MST_HYBRID_PADCTL_MODE_I2C BIT(0)
+#define I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV BIT(14)
+#define I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV BIT(15)
+
+struct gpu_i2c_dev {
+ struct device *dev;
+ void __iomem *regs;
+ struct i2c_adapter adapter;
+ struct i2c_board_info *gpu_ccgx_ucsi;
+};
+
+static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
+{
+ u32 val;
+
+ /* enable I2C */
+ val = readl(i2cd->regs + I2C_MST_HYBRID_PADCTL);
+ val |= I2C_MST_HYBRID_PADCTL_MODE_I2C |
+ I2C_MST_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
+ I2C_MST_HYBRID_PADCTL_I2C_SDA_INPUT_RCV;
+ writel(val, i2cd->regs + I2C_MST_HYBRID_PADCTL);
+
+ /* enable 100KHZ mode */
+ val = I2C_MST_I2C0_TIMING_SCL_PERIOD_100KHZ;
+ val |= (I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT_MAX
+ << I2C_MST_I2C0_TIMING_TIMEOUT_CLK_CNT);
+ val |= I2C_MST_I2C0_TIMING_TIMEOUT_CHECK;
+ writel(val, i2cd->regs + I2C_MST_I2C0_TIMING);
+}
+
+static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
+{
+ unsigned long target = jiffies + msecs_to_jiffies(1000);
+ u32 val;
+
+ do {
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
+ break;
+ if ((val & I2C_MST_CNTL_STATUS) !=
+ I2C_MST_CNTL_STATUS_BUS_BUSY)
+ break;
+ usleep_range(500, 600);
+ } while (time_is_after_jiffies(target));
+
+ if (time_is_before_jiffies(target)) {
+ dev_err(i2cd->dev, "i2c timeout error %x\n", val);
+ return -ETIME;
+ }
+
+ val = readl(i2cd->regs + I2C_MST_CNTL);
+ switch (val & I2C_MST_CNTL_STATUS) {
+ case I2C_MST_CNTL_STATUS_OKAY:
+ return 0;
+ case I2C_MST_CNTL_STATUS_NO_ACK:
+ return -EIO;
+ case I2C_MST_CNTL_STATUS_TIMEOUT:
+ return -ETIME;
+ default:
+ return 0;
+ }
+}
+
+static int gpu_i2c_read(struct gpu_i2c_dev *i2cd, u8 *data, u16 len)
+{
+ int status;
+ u32 val;
+
+ val = I2C_MST_CNTL_GEN_START | I2C_MST_CNTL_CMD_READ |
+ (len << I2C_MST_CNTL_BURST_SIZE_SHIFT) |
+ I2C_MST_CNTL_CYCLE_TRIGGER | I2C_MST_CNTL_GEN_NACK;
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ status = gpu_i2c_check_status(i2cd);
+ if (status < 0)
+ return status;
+
+ val = readl(i2cd->regs + I2C_MST_DATA);
+ switch (len) {
+ case 1:
+ data[0] = val;
+ break;
+ case 2:
+ put_unaligned_be16(val, data);
+ break;
+ case 3:
+ put_unaligned_be16(val >> 8, data);
+ data[2] = val;
+ break;
+ case 4:
+ put_unaligned_be32(val, data);
+ break;
+ default:
+ break;
+ }
+ return status;
+}
+
+static int gpu_i2c_start(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_START, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_stop(struct gpu_i2c_dev *i2cd)
+{
+ writel(I2C_MST_CNTL_GEN_STOP, i2cd->regs + I2C_MST_CNTL);
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_write(struct gpu_i2c_dev *i2cd, u8 data)
+{
+ u32 val;
+
+ writel(data, i2cd->regs + I2C_MST_DATA);
+
+ val = I2C_MST_CNTL_CMD_WRITE | (1 << I2C_MST_CNTL_BURST_SIZE_SHIFT);
+ writel(val, i2cd->regs + I2C_MST_CNTL);
+
+ return gpu_i2c_check_status(i2cd);
+}
+
+static int gpu_i2c_master_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct gpu_i2c_dev *i2cd = i2c_get_adapdata(adap);
+ int status, status2;
+ int i, j;
+
+ /*
+ * The controller supports maximum 4 byte read due to known
+ * limitation of sending STOP after every read.
+ */
+ for (i = 0; i < num; i++) {
+ if (msgs[i].flags & I2C_M_RD) {
+ /* program client address before starting read */
+ writel(msgs[i].addr, i2cd->regs + I2C_MST_ADDR);
+ /* gpu_i2c_read has implicit start */
+ status = gpu_i2c_read(i2cd, msgs[i].buf, msgs[i].len);
+ if (status < 0)
+ goto stop;
+ } else {
+ u8 addr = i2c_8bit_addr_from_msg(msgs + i);
+
+ status = gpu_i2c_start(i2cd);
+ if (status < 0) {
+ if (i == 0)
+ return status;
+ goto stop;
+ }
+
+ status = gpu_i2c_write(i2cd, addr);
+ if (status < 0)
+ goto stop;
+
+ for (j = 0; j < msgs[i].len; j++) {
+ status = gpu_i2c_write(i2cd, msgs[i].buf[j]);
+ if (status < 0)
+ goto stop;
+ }
+ }
+ }
+ status = gpu_i2c_stop(i2cd);
+ if (status < 0)
+ return status;
+
+ return i;
+stop:
+ status2 = gpu_i2c_stop(i2cd);
+ if (status2 < 0)
+ dev_err(i2cd->dev, "i2c stop failed %d\n", status2);
+ return status;
+}
+
+static const struct i2c_adapter_quirks gpu_i2c_quirks = {
+ .max_read_len = 4,
+ .flags = I2C_AQ_COMB_WRITE_THEN_READ,
+};
+
+static u32 gpu_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm gpu_i2c_algorithm = {
+ .master_xfer = gpu_i2c_master_xfer,
+ .functionality = gpu_i2c_functionality,
+};
+
+/*
+ * This driver is for Nvidia GPU cards with USB Type-C interface.
+ * We want to identify the cards using vendor ID and class code only
+ * to avoid dependency of adding product id for any new card which
+ * requires this driver.
+ * Currently there is no class code defined for UCSI device over PCI
+ * so using UNKNOWN class for now and it will be updated when UCSI
+ * over PCI gets a class code.
+ * There is no other NVIDIA cards with UNKNOWN class code. Even if the
+ * driver gets loaded for an undesired card then eventually i2c_read()
+ * (initiated from UCSI i2c_client) will timeout or UCSI commands will
+ * timeout.
+ */
+#define PCI_CLASS_SERIAL_UNKNOWN 0x0c80
+static const struct pci_device_id gpu_i2c_ids[] = {
+ { PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_SERIAL_UNKNOWN << 8, 0xffffff00},
+ { }
+};
+MODULE_DEVICE_TABLE(pci, gpu_i2c_ids);
+
+static int gpu_populate_client(struct gpu_i2c_dev *i2cd, int irq)
+{
+ struct i2c_client *ccgx_client;
+
+ i2cd->gpu_ccgx_ucsi = devm_kzalloc(i2cd->dev,
+ sizeof(*i2cd->gpu_ccgx_ucsi),
+ GFP_KERNEL);
+ if (!i2cd->gpu_ccgx_ucsi)
+ return -ENOMEM;
+
+ strlcpy(i2cd->gpu_ccgx_ucsi->type, "ccgx-ucsi",
+ sizeof(i2cd->gpu_ccgx_ucsi->type));
+ i2cd->gpu_ccgx_ucsi->addr = 0x8;
+ i2cd->gpu_ccgx_ucsi->irq = irq;
+ ccgx_client = i2c_new_device(&i2cd->adapter, i2cd->gpu_ccgx_ucsi);
+ if (!ccgx_client)
+ return -ENODEV;
+
+ return 0;
+}
+
+static int gpu_i2c_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct gpu_i2c_dev *i2cd;
+ int status;
+
+ i2cd = devm_kzalloc(&pdev->dev, sizeof(*i2cd), GFP_KERNEL);
+ if (!i2cd)
+ return -ENOMEM;
+
+ i2cd->dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, i2cd);
+
+ status = pcim_enable_device(pdev);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pcim_enable_device failed %d\n", status);
+ return status;
+ }
+
+ pci_set_master(pdev);
+
+ i2cd->regs = pcim_iomap(pdev, 0, 0);
+ if (!i2cd->regs) {
+ dev_err(&pdev->dev, "pcim_iomap failed\n");
+ return -ENOMEM;
+ }
+
+ status = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
+ if (status < 0) {
+ dev_err(&pdev->dev, "pci_alloc_irq_vectors err %d\n", status);
+ return status;
+ }
+
+ gpu_enable_i2c_bus(i2cd);
+
+ i2c_set_adapdata(&i2cd->adapter, i2cd);
+ i2cd->adapter.owner = THIS_MODULE;
+ strlcpy(i2cd->adapter.name, "NVIDIA GPU I2C adapter",
+ sizeof(i2cd->adapter.name));
+ i2cd->adapter.algo = &gpu_i2c_algorithm;
+ i2cd->adapter.quirks = &gpu_i2c_quirks;
+ i2cd->adapter.dev.parent = &pdev->dev;
+ status = i2c_add_adapter(&i2cd->adapter);
+ if (status < 0)
+ goto free_irq_vectors;
+
+ status = gpu_populate_client(i2cd, pdev->irq);
+ if (status < 0) {
+ dev_err(&pdev->dev, "gpu_populate_client failed %d\n", status);
+ goto del_adapter;
+ }
+
+ return 0;
+
+del_adapter:
+ i2c_del_adapter(&i2cd->adapter);
+free_irq_vectors:
+ pci_free_irq_vectors(pdev);
+ return status;
+}
+
+static void gpu_i2c_remove(struct pci_dev *pdev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(&pdev->dev);
+
+ i2c_del_adapter(&i2cd->adapter);
+ pci_free_irq_vectors(pdev);
+}
+
+static int gpu_i2c_resume(struct device *dev)
+{
+ struct gpu_i2c_dev *i2cd = dev_get_drvdata(dev);
+
+ gpu_enable_i2c_bus(i2cd);
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(gpu_i2c_driver_pm, NULL, gpu_i2c_resume, NULL);
+
+static struct pci_driver gpu_i2c_driver = {
+ .name = "nvidia-gpu",
+ .id_table = gpu_i2c_ids,
+ .probe = gpu_i2c_probe,
+ .remove = gpu_i2c_remove,
+ .driver = {
+ .pm = &gpu_i2c_driver_pm,
+ },
+};
+
+module_pci_driver(gpu_i2c_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia GPU I2C controller Driver");
+MODULE_LICENSE("GPL v2");
dev_dbg(&pdev->dev, "i2c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
- ret = i2c_add_adapter(&gi2c->adap);
- if (ret) {
- dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
- return ret;
- }
-
gi2c->suspended = 1;
pm_runtime_set_suspended(gi2c->se.dev);
pm_runtime_set_autosuspend_delay(gi2c->se.dev, I2C_AUTO_SUSPEND_DELAY);
pm_runtime_use_autosuspend(gi2c->se.dev);
pm_runtime_enable(gi2c->se.dev);
+ ret = i2c_add_adapter(&gi2c->adap);
+ if (ret) {
+ dev_err(&pdev->dev, "Error adding i2c adapter %d\n", ret);
+ pm_runtime_disable(gi2c->se.dev);
+ return ret;
+ }
+
return 0;
}
{
struct geni_i2c_dev *gi2c = platform_get_drvdata(pdev);
- pm_runtime_disable(gi2c->se.dev);
i2c_del_adapter(&gi2c->adap);
+ pm_runtime_disable(gi2c->se.dev);
return 0;
}
{
struct pattern_trig_data *data = from_timer(data, t, timer);
- mutex_lock(&data->lock);
-
for (;;) {
if (!data->is_indefinite && !data->repeat)
break;
data->curr->brightness);
mod_timer(&data->timer,
jiffies + msecs_to_jiffies(data->curr->delta_t));
-
- /* Skip the tuple with zero duration */
- pattern_trig_update_patterns(data);
+ if (!data->next->delta_t) {
+ /* Skip the tuple with zero duration */
+ pattern_trig_update_patterns(data);
+ }
/* Select next tuple */
pattern_trig_update_patterns(data);
} else {
break;
}
-
- mutex_unlock(&data->lock);
}
static int pattern_trig_start_pattern(struct led_classdev *led_cdev)
if (res < -1 || res == 0)
return -EINVAL;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
struct pattern_trig_data *data = led_cdev->trigger_data;
int ccount, cr, offset = 0, err = 0;
- /*
- * Clear previous patterns' performence firstly, and remove the timer
- * without mutex lock to avoid dead lock.
- */
- del_timer_sync(&data->timer);
-
mutex_lock(&data->lock);
+ del_timer_sync(&data->timer);
+
if (data->is_hw_pattern)
led_cdev->pattern_clear(led_cdev);
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
select BCH
- select BCH_CONST_PARAMS
+ select BCH_CONST_PARAMS if !MTD_NAND_BCH
select BITREVERSE
help
This provides an MTD device driver for the M-Systems DiskOnChip
info->mtd = info->subdev[0].mtd;
ret = 0;
} else if (info->num_subdev > 1) {
- struct mtd_info *cdev[nr];
+ struct mtd_info **cdev;
+
+ cdev = kmalloc_array(nr, sizeof(*cdev), GFP_KERNEL);
+ if (!cdev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
/*
* We detected multiple devices. Concatenate them together.
*/
info->mtd = mtd_concat_create(cdev, info->num_subdev,
plat->name);
+ kfree(cdev);
if (info->mtd == NULL) {
ret = -ENXIO;
goto err;
/**
* panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
* @chip: NAND chip structure
* @timeo: timeout
*
err_unmap:
dma_unmap_single(nor->dev, dma_dst, len, DMA_FROM_DEVICE);
- return 0;
+ return ret;
}
static ssize_t cqspi_read(struct spi_nor *nor, loff_t from,
memcpy(&sfdp_params, params, sizeof(sfdp_params));
memcpy(&prev_map, &nor->erase_map, sizeof(prev_map));
- if (spi_nor_parse_sfdp(nor, &sfdp_params))
+ if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+ nor->addr_width = 0;
/* restore previous erase map */
memcpy(&nor->erase_map, &prev_map,
sizeof(nor->erase_map));
- else
+ } else {
memcpy(params, &sfdp_params, sizeof(*params));
+ }
}
return 0;
case NETDEV_CHANGE:
/* For 802.3ad mode only:
* Getting invalid Speed/Duplex values here will put slave
- * in weird state. So mark it as link-down for the time
+ * in weird state. So mark it as link-fail for the time
* being and let link-monitoring (miimon) set it right when
* correct speeds/duplex are available.
*/
if (bond_update_speed_duplex(slave) &&
BOND_MODE(bond) == BOND_MODE_8023AD)
- slave->link = BOND_LINK_DOWN;
+ slave->link = BOND_LINK_FAIL;
if (BOND_MODE(bond) == BOND_MODE_8023AD)
bond_3ad_adapter_speed_duplex_changed(slave);
{
int i;
- mutex_init(&dev->reg_mutex);
- mutex_init(&dev->stats_mutex);
- mutex_init(&dev->alu_mutex);
- mutex_init(&dev->vlan_mutex);
-
dev->ds->ops = &ksz_switch_ops;
for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) {
if (dev->pdata)
dev->chip_id = dev->pdata->chip_id;
+ mutex_init(&dev->reg_mutex);
+ mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->alu_mutex);
+ mutex_init(&dev->vlan_mutex);
+
if (ksz_switch_detect(dev))
return -EINVAL;
};
extern const struct ethtool_ops alx_ethtool_ops;
-extern const char alx_drv_name[];
#endif
#include "hw.h"
#include "reg.h"
-const char alx_drv_name[] = "alx";
+static const char alx_drv_name[] = "alx";
static void alx_free_txbuf(struct alx_tx_queue *txq, int entry)
{
intrl2_1_mask_clear(priv, 0xffffffff);
else
intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
-
- /* Last call before we start the real business */
- netif_tx_start_all_queues(dev);
}
static void rbuf_init(struct bcm_sysport_priv *priv)
bcm_sysport_netif_start(dev);
+ netif_tx_start_all_queues(dev);
+
return 0;
out_clear_rx_int:
struct bcm_sysport_priv *priv = netdev_priv(dev);
/* stop all software from updating hardware */
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(dev);
napi_disable(&priv->napi);
cancel_work_sync(&priv->dim.dim.work);
phy_stop(dev->phydev);
if (!netif_running(dev))
return 0;
+ netif_device_detach(dev);
+
bcm_sysport_netif_stop(dev);
phy_suspend(dev->phydev);
- netif_device_detach(dev);
-
/* Disable UniMAC RX */
umac_enable_set(priv, CMD_RX_EN, 0);
goto out_free_rx_ring;
}
- netif_device_attach(dev);
-
/* RX pipe enable */
topctrl_writel(priv, 0, RX_FLUSH_CNTL);
bcm_sysport_netif_start(dev);
+ netif_device_attach(dev);
+
return 0;
out_free_rx_ring:
umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
- netif_tx_start_all_queues(dev);
bcmgenet_enable_tx_napi(priv);
/* Monitor link interrupts now */
bcmgenet_netif_start(dev);
+ netif_tx_start_all_queues(dev);
+
return 0;
err_irq1:
struct bcmgenet_priv *priv = netdev_priv(dev);
bcmgenet_disable_tx_napi(priv);
- netif_tx_stop_all_queues(dev);
+ netif_tx_disable(dev);
/* Disable MAC receive */
umac_enable_set(priv, CMD_RX_EN, false);
if (!netif_running(dev))
return 0;
+ netif_device_detach(dev);
+
bcmgenet_netif_stop(dev);
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
- netif_device_detach(dev);
-
/* Prepare the device for Wake-on-LAN and switch to the slow clock */
if (device_may_wakeup(d) && priv->wolopts) {
ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
/* Always enable ring 16 - descriptor ring */
bcmgenet_enable_dma(priv, dma_ctrl);
- netif_device_attach(dev);
-
if (!device_may_wakeup(d))
phy_resume(dev->phydev);
bcmgenet_netif_start(dev);
+ netif_device_attach(dev);
+
return 0;
out_clk_disable:
*/
static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
{
- struct hclge_vport *vport = hdev->vport;
- u32 i, k, qs_bitmap;
- int ret;
+ int i;
for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
- qs_bitmap = 0;
+ u32 qs_bitmap = 0;
+ int k, ret;
for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hclge_vport *vport = &hdev->vport[k];
u16 qs_id = vport->qs_offset + tc;
u8 grp, sub_grp;
HCLGE_BP_SUB_GRP_ID_S);
if (i == grp)
qs_bitmap |= (1 << sub_grp);
-
- vport++;
}
ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
ring->packets++;
}
ring->bytes += tx_info->nr_bytes;
- netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
netif_tx_stop_queue(ring->tx_queue);
ring->queue_stopped++;
}
- send_doorbell = !skb->xmit_more || netif_xmit_stopped(ring->tx_queue);
+
+ send_doorbell = __netdev_tx_sent_queue(ring->tx_queue,
+ tx_info->nr_bytes,
+ skb->xmit_more);
real_size = (real_size / 16) & 0x3f;
burst_size = 7;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
- is_bytes = true;
rate = 4 * 1024;
burst_size = 4;
break;
struct qed_ptt *p_ptt, u32 *p_speed_mask)
{
u32 transceiver_type, transceiver_state;
+ int ret;
- qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
- &transceiver_type);
+ ret = qed_mcp_get_transceiver_data(p_hwfn, p_ptt, &transceiver_state,
+ &transceiver_type);
+ if (ret)
+ return ret;
if (qed_is_transceiver_ready(transceiver_state, transceiver_type) ==
false)
.flags = PHY_HAS_INTERRUPT,
}, {
.phy_id = 0x001cc816,
- .name = "RTL8201F 10/100Mbps Ethernet",
+ .name = "RTL8201F Fast Ethernet",
.phy_id_mask = 0x001fffff,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
return ret;
}
+ cancel_delayed_work_sync(&pdata->carrier_check);
+
if (pdata->suspend_flags) {
netdev_warn(dev->net, "error during last resume\n");
pdata->suspend_flags = 0;
*/
if (ret && PMSG_IS_AUTO(message))
usbnet_resume(intf);
+
+ if (ret)
+ schedule_delayed_work(&pdata->carrier_check,
+ CARRIER_CHECK_DELAY);
+
return ret;
}
if (ns->ndev)
nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH
- if (ns->head->disk)
+ if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ }
#endif
}
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
+ blk_set_stacking_limits(&q->limits);
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
struct pci_dev *p2p_dev;
int ret;
- if (!ctrl->p2p_client)
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
if (ns->p2p_dev) {
int inline_page_count;
};
-static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_workqueue(nvmet_rdma_delete_wq);
+ flush_scheduled_work();
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
if (disconnect) {
rdma_disconnect(queue->cm_id);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
}
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
/**
if (ret)
goto err_ib_client;
- nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
- if (!nvmet_rdma_delete_wq) {
- ret = -ENOMEM;
- goto err_unreg_transport;
- }
-
return 0;
-err_unreg_transport:
- nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client);
return ret;
static void __exit nvmet_rdma_exit(void)
{
- destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
* set by the driver.
*/
mask = DMA_BIT_MASK(ilog2(dma_addr + size - 1) + 1);
- dev->bus_dma_mask = mask;
dev->coherent_dma_mask &= mask;
*dev->dma_mask &= mask;
+ /* ...but only set bus mask if we found valid dma-ranges earlier */
+ if (!ret)
+ dev->bus_dma_mask = mask;
coherent = of_dma_is_coherent(np);
dev_dbg(dev, "device is%sdma coherent\n",
distance = of_read_number(matrix, 1);
matrix++;
+ if ((nodea == nodeb && distance != LOCAL_DISTANCE) ||
+ (nodea != nodeb && distance <= LOCAL_DISTANCE)) {
+ pr_err("Invalid distance[node%d -> node%d] = %d\n",
+ nodea, nodeb, distance);
+ return -EINVAL;
+ }
+
numa_set_distance(nodea, nodeb, distance);
- pr_debug("distance[node%d -> node%d] = %d\n",
- nodea, nodeb, distance);
/* Set default distance of node B->A same as A->B */
if (nodeb > nodea)
#define SENSE_RESETTING_EVENT_BYTE 1
#define SENSE_RESETTING_EVENT_FLAG 0x80
+static inline u32 qeth_get_device_id(struct ccw_device *cdev)
+{
+ struct ccw_dev_id dev_id;
+ u32 id;
+
+ ccw_device_get_id(cdev, &dev_id);
+ id = dev_id.devno;
+ id |= (u32) (dev_id.ssid << 16);
+
+ return id;
+}
+
/*
* Common IO related definitions
*/
#define CARD_RDEV_ID(card) dev_name(&card->read.ccwdev->dev)
#define CARD_WDEV_ID(card) dev_name(&card->write.ccwdev->dev)
#define CARD_DDEV_ID(card) dev_name(&card->data.ccwdev->dev)
-#define CHANNEL_ID(channel) dev_name(&channel->ccwdev->dev)
+#define CCW_DEVID(cdev) (qeth_get_device_id(cdev))
+#define CARD_DEVID(card) (CCW_DEVID(CARD_RDEV(card)))
/**
* card stuff
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline bool qeth_netdev_is_registered(struct net_device *dev)
+{
+ return dev->netdev_ops != NULL;
+}
+
static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
unsigned int elements)
{
int qeth_do_run_thread(struct qeth_card *, unsigned long);
void qeth_clear_thread_start_bit(struct qeth_card *, unsigned long);
void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
-int qeth_core_hardsetup_card(struct qeth_card *);
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
void qeth_close_dev(struct qeth_card *);
-int qeth_send_setassparms(struct qeth_card *, struct qeth_cmd_buffer *, __u16,
- long,
- int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *, unsigned long),
- void *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
enum qeth_ipa_funcs,
return "OSD_1000";
case QETH_LINK_TYPE_10GBIT_ETH:
return "OSD_10GIG";
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ return "OSD_25GIG";
case QETH_LINK_TYPE_LANE_ETH100:
return "OSD_FE_LANE";
case QETH_LINK_TYPE_LANE_TR:
if (!iob) {
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob "
- "available\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "issue_next_read on device %x failed: no iob available\n",
+ CARD_DEVID(card));
return -ENOMEM;
}
qeth_setup_ccw(channel->ccw, CCW_CMD_READ, QETH_BUFSIZE, iob->data);
rc = ccw_device_start(channel->ccwdev, channel->ccw,
(addr_t) iob, 0, 0);
if (rc) {
- QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
- "rc=%i\n", dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
+ rc, CARD_DEVID(card));
atomic_set(&channel->irq_pending, 0);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
const char *ipa_name;
int com = cmd->hdr.command;
ipa_name = qeth_get_ipa_cmd_name(com);
+
if (rc)
- QETH_DBF_MESSAGE(2, "IPA: %s(x%X) for %s/%s returned "
- "x%X \"%s\"\n",
- ipa_name, com, dev_name(&card->gdev->dev),
- QETH_CARD_IFNAME(card), rc,
- qeth_get_ipa_msg(rc));
+ QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
+ ipa_name, com, CARD_DEVID(card), rc,
+ qeth_get_ipa_msg(rc));
else
- QETH_DBF_MESSAGE(5, "IPA: %s(x%X) for %s/%s succeeded\n",
- ipa_name, com, dev_name(&card->gdev->dev),
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
+ ipa_name, com, CARD_DEVID(card));
}
static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
if ((buffer[2] & 0xc0) == 0xc0) {
- QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#02x\n",
+ QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
buffer[4]);
QETH_CARD_TEXT(card, 2, "ckidxres");
QETH_CARD_TEXT(card, 2, " idxterm");
QETH_CARD_TEXT(card, 2, "CGENCHK");
dev_warn(&cdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x\n",
- dev_name(&cdev->dev), dstat, cstat);
+ QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
+ CCW_DEVID(cdev), dstat, cstat);
print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
16, 1, irb, 64, 1);
return 1;
switch (PTR_ERR(irb)) {
case -EIO:
- QETH_DBF_MESSAGE(2, "%s i/o-error on device\n",
- dev_name(&cdev->dev));
+ QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
+ CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT_(card, 2, " rc%d", -EIO);
break;
}
break;
default:
- QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n",
- dev_name(&cdev->dev), PTR_ERR(irb));
+ QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
+ PTR_ERR(irb), CCW_DEVID(cdev));
QETH_CARD_TEXT(card, 2, "ckirberr");
QETH_CARD_TEXT(card, 2, " rc???");
}
dev_warn(&channel->ccwdev->dev,
"The qeth device driver failed to recover "
"an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s sense data available. cstat "
- "0x%X dstat 0x%X\n",
- dev_name(&channel->ccwdev->dev), cstat, dstat);
+ QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
+ CCW_DEVID(channel->ccwdev), cstat,
+ dstat);
print_hex_dump(KERN_WARNING, "qeth: irb ",
DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
print_hex_dump(KERN_WARNING, "qeth: sense data ",
if (channel->state != CH_STATE_ACTIVATING) {
dev_warn(&channel->ccwdev->dev, "The qeth device driver"
" failed to recover an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX activate timed out on channel %x\n",
+ CCW_DEVID(channel->ccwdev));
QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME);
return -ETIME;
}
"The adapter is used exclusively by another "
"host\n");
else
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:"
- " negative reply\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
goto out;
}
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: "
- "function level mismatch (sent: 0x%x, received: "
- "0x%x)\n", dev_name(&channel->ccwdev->dev),
- card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, temp);
goto out;
}
channel->state = CH_STATE_UP;
"insufficient authorization\n");
break;
default:
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:"
- " negative reply\n",
- dev_name(&channel->ccwdev->dev));
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
+ CCW_DEVID(channel->ccwdev));
}
QETH_CARD_TEXT_(card, 2, "idxread%c",
QETH_IDX_ACT_CAUSE_CODE(iob->data));
memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
if (temp != qeth_peer_func_level(card->info.func_level)) {
- QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function "
- "level mismatch (sent: 0x%x, received: 0x%x)\n",
- dev_name(&channel->ccwdev->dev),
- card->info.func_level, temp);
+ QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
+ CCW_DEVID(channel->ccwdev),
+ card->info.func_level, temp);
goto out;
}
memcpy(&card->token.issuer_rm_r,
(addr_t) iob, 0, 0, event_timeout);
spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: "
- "ccw_device_start rc = %i\n",
- dev_name(&channel->ccwdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
+ CARD_DEVID(card), rc);
QETH_CARD_TEXT_(card, 2, " err%d", rc);
spin_lock_irq(&card->lock);
list_del_init(&reply->list);
} else {
dev_warn(&card->gdev->dev,
"The qeth driver ran out of channel command buffers\n");
- QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
- dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(1, "device %x ran out of channel command buffers",
+ CARD_DEVID(card));
}
return iob;
return 0;
default:
if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Unhandled "
- "rc=%d\n",
- dev_name(&card->gdev->dev),
- cmd->hdr.return_code);
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
+ CARD_DEVID(card),
+ cmd->hdr.return_code);
return 0;
}
}
card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
} else
- QETH_DBF_MESSAGE(1, "%s IPA_CMD_QIPASSIST: Flawed LIC detected"
- "\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
+ CARD_DEVID(card));
return 0;
}
cmd->data.setadapterparms.hdr.return_code);
if (cmd->data.setadapterparms.hdr.return_code !=
SET_ACCESS_CTRL_RC_SUCCESS)
- QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
- card->gdev->dev.kobj.name,
- access_ctrl_req->subcmd_code,
- cmd->data.setadapterparms.hdr.return_code);
+ QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
+ access_ctrl_req->subcmd_code, CARD_DEVID(card),
+ cmd->data.setadapterparms.hdr.return_code);
switch (cmd->data.setadapterparms.hdr.return_code) {
case SET_ACCESS_CTRL_RC_SUCCESS:
if (card->options.isolation == ISOLATION_MODE_NONE) {
}
break;
case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already "
- "deactivated\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
+ CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
- QETH_DBF_MESSAGE(2, "%s QDIO data connection isolation already"
- " activated\n", dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
+ CARD_DEVID(card));
if (fallback)
card->options.isolation = card->options.prev_isolation;
break;
rc = qeth_setadpparms_set_access_ctrl(card,
card->options.isolation, fallback);
if (rc) {
- QETH_DBF_MESSAGE(3,
- "IPA(SET_ACCESS_CTRL,%s,%d) sent failed\n",
- card->gdev->dev.kobj.name,
- rc);
+ QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
+ rc, CARD_DEVID(card));
rc = -EOPNOTSUPP;
}
} else if (card->options.isolation != ISOLATION_MODE_NONE) {
rc = BMCR_FULLDPLX;
if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
(card->info.link_type != QETH_LINK_TYPE_OSN) &&
- (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH))
+ (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
+ (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
rc |= BMCR_SPEED100;
break;
case MII_BMSR: /* Basic mode status register */
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
qeth_snmp_command_cb, (void *)&qinfo);
if (rc)
- QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n",
- QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
+ CARD_DEVID(card), rc);
else {
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
rc = qeth_read_conf_data(card, (void **) &prcd, &length);
if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
- dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
goto out_offline;
}
.remove = ccwgroup_remove_ccwdev,
};
-int qeth_core_hardsetup_card(struct qeth_card *card)
+int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
{
int retries = 3;
int rc;
qeth_update_from_chp_desc(card);
retry:
if (retries < 3)
- QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
- dev_name(&card->gdev->dev));
+ QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
+ CARD_DEVID(card));
rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
if (rc == IPA_RC_LAN_OFFLINE) {
dev_warn(&card->gdev->dev,
"The LAN is offline\n");
- netif_carrier_off(card->dev);
+ *carrier_ok = false;
} else {
rc = -ENODEV;
goto out;
}
} else {
- netif_carrier_on(card->dev);
+ *carrier_ok = true;
+ }
+
+ if (qeth_netdev_is_registered(card->dev)) {
+ if (*carrier_ok)
+ netif_carrier_on(card->dev);
+ else
+ netif_carrier_off(card->dev);
}
card->options.ipa4.supported_funcs = 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
"an error on the device\n");
- QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n",
- dev_name(&card->gdev->dev), rc);
+ QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
+ CARD_DEVID(card), rc);
return rc;
}
EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
}
EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
-int qeth_send_setassparms(struct qeth_card *card,
- struct qeth_cmd_buffer *iob, __u16 len, long data,
- int (*reply_cb)(struct qeth_card *,
- struct qeth_reply *, unsigned long),
- void *reply_param)
+static int qeth_send_setassparms(struct qeth_card *card,
+ struct qeth_cmd_buffer *iob, u16 len,
+ long data, int (*reply_cb)(struct qeth_card *,
+ struct qeth_reply *,
+ unsigned long),
+ void *reply_param)
{
int rc;
struct qeth_ipa_cmd *cmd;
rc = qeth_send_ipa_cmd(card, iob, reply_cb, reply_param);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_setassparms);
int qeth_send_simple_setassparms_prot(struct qeth_card *card,
enum qeth_ipa_funcs ipa_func,
WARN_ON_ONCE(1);
}
- /* fallthrough from high to low, to select all legal speeds: */
+ /* partially does fall through, to also select lower speeds */
switch (maxspeed) {
+ case SPEED_25000:
+ ethtool_link_ksettings_add_link_mode(cmd, supported,
+ 25000baseSR_Full);
+ ethtool_link_ksettings_add_link_mode(cmd, advertising,
+ 25000baseSR_Full);
+ break;
case SPEED_10000:
ethtool_link_ksettings_add_link_mode(cmd, supported,
10000baseT_Full);
cmd->base.speed = SPEED_10000;
cmd->base.port = PORT_FIBRE;
break;
+ case QETH_LINK_TYPE_25GBIT_ETH:
+ cmd->base.speed = SPEED_25000;
+ cmd->base.port = PORT_FIBRE;
+ break;
default:
cmd->base.speed = SPEED_10;
cmd->base.port = PORT_TP;
case CARD_INFO_PORTS_10G:
cmd->base.speed = SPEED_10000;
break;
+ case CARD_INFO_PORTS_25G:
+ cmd->base.speed = SPEED_25000;
+ break;
}
return 0;
QETH_LINK_TYPE_GBIT_ETH = 0x03,
QETH_LINK_TYPE_OSN = 0x04,
QETH_LINK_TYPE_10GBIT_ETH = 0x10,
+ QETH_LINK_TYPE_25GBIT_ETH = 0x12,
QETH_LINK_TYPE_LANE_ETH100 = 0x81,
QETH_LINK_TYPE_LANE_TR = 0x82,
QETH_LINK_TYPE_LANE_ETH1000 = 0x83,
CARD_INFO_PORTS_100M = 0x00000006,
CARD_INFO_PORTS_1G = 0x00000007,
CARD_INFO_PORTS_10G = 0x00000008,
+ CARD_INFO_PORTS_25G = 0x0000000A,
};
/* (SET)DELIP(M) IPA stuff ***************************************************/
__u32 flags_32bit;
struct qeth_ipa_caps caps;
struct qeth_checksum_cmd chksum;
- struct qeth_arp_cache_entry add_arp_entry;
+ struct qeth_arp_cache_entry arp_entry;
struct qeth_arp_query_data query_arp;
struct qeth_tso_start_data tso;
__u8 ip[16];
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EEXIST)
- QETH_DBF_MESSAGE(2, "MAC %pM already registered on %s\n",
- mac, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
+ CARD_DEVID(card));
else if (rc)
- QETH_DBF_MESSAGE(2, "Failed to register MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
+ CARD_DEVID(card), rc);
return rc;
}
QETH_CARD_TEXT(card, 2, "L2Rmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
- QETH_DBF_MESSAGE(2, "Failed to delete MAC %pM on %s: %d\n",
- mac, QETH_CARD_IFNAME(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
+ CARD_DEVID(card), rc);
return rc;
}
QETH_CARD_TEXT(card, 2, "L2sdvcb");
if (cmd->hdr.return_code) {
- QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x.\n",
+ QETH_DBF_MESSAGE(2, "Error in processing VLAN %u on device %x: %#x.\n",
cmd->data.setdelvlan.vlan_id,
- QETH_CARD_IFNAME(card), cmd->hdr.return_code);
+ CARD_DEVID(card), cmd->hdr.return_code);
QETH_CARD_TEXT_(card, 2, "L2VL%4x", cmd->hdr.command);
QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
}
rc = qeth_vm_request_mac(card);
if (!rc)
goto out;
- QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %s: x%x\n",
- CARD_BUS_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "z/VM MAC Service failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "err%04x", rc);
/* fall back to alternative mechanism: */
}
rc = qeth_setadpparms_change_macaddr(card);
if (!rc)
goto out;
- QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %s: x%x\n",
- CARD_BUS_ID(card), rc);
+ QETH_DBF_MESSAGE(2, "READ_MAC Assist failed on device %x: %#x\n",
+ CARD_DEVID(card), rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
/* fall back once more: */
}
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
- unregister_netdev(card->dev);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
}
static const struct ethtool_ops qeth_l2_ethtool_ops = {
.ndo_set_features = qeth_set_features
};
-static int qeth_l2_setup_netdev(struct qeth_card *card)
+static int qeth_l2_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
int rc;
- if (card->dev->netdev_ops)
+ if (qeth_netdev_is_registered(card->dev))
return 0;
card->dev->priv_flags |= IFF_UNICAST_FLT;
qeth_l2_request_initial_mac(card);
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
+ if (!rc && carrier_ok)
+ netif_carrier_on(card->dev);
+
if (rc)
card->dev->netdev_ops = NULL;
return rc;
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
+ bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
- rc = qeth_core_hardsetup_card(card);
+ rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
dev_info(&card->gdev->dev,
"The device represents a Bridge Capable Port\n");
- rc = qeth_l2_setup_netdev(card);
+ rc = qeth_l2_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;
QETH_CARD_TEXT(card, 4, "clearip");
- if (recover && card->options.sniffer)
- return;
-
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
QETH_PROT_IPV4);
if (rc) {
card->options.route4.type = NO_ROUTER;
- QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
- " on %s. Type set to 'no router'.\n", rc,
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+ rc, CARD_DEVID(card));
}
return rc;
}
QETH_PROT_IPV6);
if (rc) {
card->options.route6.type = NO_ROUTER;
- QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type"
- " on %s. Type set to 'no router'.\n", rc,
- QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Error (%#06x) while setting routing type on device %x. Type set to 'no router'.\n",
+ rc, CARD_DEVID(card));
}
return rc;
}
int rc = 0;
int cnt = 3;
+ if (card->options.sniffer)
+ return 0;
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "setaddr4");
{
int rc = 0;
+ if (card->options.sniffer)
+ return 0;
+
if (addr->proto == QETH_PROT_IPV4) {
QETH_CARD_TEXT(card, 2, "deladdr4");
QETH_CARD_HEX(card, 3, &addr->u.a4.addr, sizeof(int));
}
break;
default:
- QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
- cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+ QETH_DBF_MESSAGE(2, "Unknown sniffer action (%#06x) on device %x\n",
+ cmd->data.diagass.action, CARD_DEVID(card));
}
return 0;
qeth_l3_handle_promisc_mode(card);
}
-static const char *qeth_l3_arp_get_error_cause(int *rc)
+static int qeth_l3_arp_makerc(int rc)
{
- switch (*rc) {
- case QETH_IPA_ARP_RC_FAILED:
- *rc = -EIO;
- return "operation failed";
+ switch (rc) {
+ case IPA_RC_SUCCESS:
+ return 0;
case QETH_IPA_ARP_RC_NOTSUPP:
- *rc = -EOPNOTSUPP;
- return "operation not supported";
- case QETH_IPA_ARP_RC_OUT_OF_RANGE:
- *rc = -EINVAL;
- return "argument out of range";
case QETH_IPA_ARP_RC_Q_NOTSUPP:
- *rc = -EOPNOTSUPP;
- return "query operation not supported";
+ return -EOPNOTSUPP;
+ case QETH_IPA_ARP_RC_OUT_OF_RANGE:
+ return -EINVAL;
case QETH_IPA_ARP_RC_Q_NO_DATA:
- *rc = -ENOENT;
- return "no query data available";
+ return -ENOENT;
default:
- return "unknown error";
+ return -EIO;
}
}
static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries)
{
- int tmp;
int rc;
QETH_CARD_TEXT(card, 3, "arpstnoe");
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_SET_NO_ENTRIES,
no_entries);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on "
- "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static __u32 get_arp_entry_size(struct qeth_card *card,
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- int tmp;
int rc;
QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
rc = qeth_l3_send_ipa_arp_cmd(card, iob,
QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
qeth_l3_arp_query_cb, (void *)qinfo);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2,
- "Error while querying ARP cache on %s: %s "
- "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
-
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Error while querying ARP cache on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
return rc;
}
-static int qeth_l3_arp_add_entry(struct qeth_card *card,
- struct qeth_arp_cache_entry *entry)
+static int qeth_l3_arp_modify_entry(struct qeth_card *card,
+ struct qeth_arp_cache_entry *entry,
+ enum qeth_arp_process_subcmds arp_cmd)
{
+ struct qeth_arp_cache_entry *cmd_entry;
struct qeth_cmd_buffer *iob;
- char buf[16];
- int tmp;
int rc;
- QETH_CARD_TEXT(card, 3, "arpadent");
+ if (arp_cmd == IPA_CMD_ASS_ARP_ADD_ENTRY)
+ QETH_CARD_TEXT(card, 3, "arpadd");
+ else
+ QETH_CARD_TEXT(card, 3, "arpdel");
/*
* currently GuestLAN only supports the ARP assist function
return -EOPNOTSUPP;
}
- iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_ADD_ENTRY,
- sizeof(struct qeth_arp_cache_entry),
- QETH_PROT_IPV4);
+ iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING, arp_cmd,
+ sizeof(*cmd_entry), QETH_PROT_IPV4);
if (!iob)
return -ENOMEM;
- rc = qeth_send_setassparms(card, iob,
- sizeof(struct qeth_arp_cache_entry),
- (unsigned long) entry,
- qeth_setassparms_cb, NULL);
- if (rc) {
- tmp = rc;
- qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
- QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s "
- "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
-}
-
-static int qeth_l3_arp_remove_entry(struct qeth_card *card,
- struct qeth_arp_cache_entry *entry)
-{
- struct qeth_cmd_buffer *iob;
- char buf[16] = {0, };
- int tmp;
- int rc;
- QETH_CARD_TEXT(card, 3, "arprment");
+ cmd_entry = &__ipa_cmd(iob)->data.setassparms.data.arp_entry;
+ ether_addr_copy(cmd_entry->macaddr, entry->macaddr);
+ memcpy(cmd_entry->ipaddr, entry->ipaddr, 4);
+ rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not modify (cmd: %#x) ARP entry on device %x: %#x\n",
+ arp_cmd, CARD_DEVID(card), rc);
- /*
- * currently GuestLAN only supports the ARP assist function
- * IPA_CMD_ASS_ARP_QUERY_INFO, but not IPA_CMD_ASS_ARP_REMOVE_ENTRY;
- * thus we say EOPNOTSUPP for this ARP function
- */
- if (card->info.guestlan)
- return -EOPNOTSUPP;
- if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) {
- return -EOPNOTSUPP;
- }
- memcpy(buf, entry, 12);
- iob = qeth_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
- IPA_CMD_ASS_ARP_REMOVE_ENTRY,
- 12,
- QETH_PROT_IPV4);
- if (!iob)
- return -ENOMEM;
- rc = qeth_send_setassparms(card, iob,
- 12, (unsigned long)buf,
- qeth_setassparms_cb, NULL);
- if (rc) {
- tmp = rc;
- memset(buf, 0, 16);
- qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf);
- QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s"
- " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_arp_flush_cache(struct qeth_card *card)
{
int rc;
- int tmp;
QETH_CARD_TEXT(card, 3, "arpflush");
}
rc = qeth_send_simple_setassparms(card, IPA_ARP_PROCESSING,
IPA_CMD_ASS_ARP_FLUSH_CACHE, 0);
- if (rc) {
- tmp = rc;
- QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s "
- "(0x%x/%d)\n", QETH_CARD_IFNAME(card),
- qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
- }
- return rc;
+ if (rc)
+ QETH_DBF_MESSAGE(2, "Could not flush ARP cache on device %x: %#x\n",
+ CARD_DEVID(card), rc);
+ return qeth_l3_arp_makerc(rc);
}
static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
+ enum qeth_arp_process_subcmds arp_cmd;
int rc = 0;
switch (cmd) {
rc = qeth_l3_arp_query(card, rq->ifr_ifru.ifru_data);
break;
case SIOC_QETH_ARP_ADD_ENTRY:
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
- if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
- sizeof(struct qeth_arp_cache_entry)))
- rc = -EFAULT;
- else
- rc = qeth_l3_arp_add_entry(card, &arp_entry);
- break;
case SIOC_QETH_ARP_REMOVE_ENTRY:
- if (!capable(CAP_NET_ADMIN)) {
- rc = -EPERM;
- break;
- }
- if (copy_from_user(&arp_entry, rq->ifr_ifru.ifru_data,
- sizeof(struct qeth_arp_cache_entry)))
- rc = -EFAULT;
- else
- rc = qeth_l3_arp_remove_entry(card, &arp_entry);
- break;
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ if (copy_from_user(&arp_entry, rq->ifr_data, sizeof(arp_entry)))
+ return -EFAULT;
+
+ arp_cmd = (cmd == SIOC_QETH_ARP_ADD_ENTRY) ?
+ IPA_CMD_ASS_ARP_ADD_ENTRY :
+ IPA_CMD_ASS_ARP_REMOVE_ENTRY;
+ return qeth_l3_arp_modify_entry(card, &arp_entry, arp_cmd);
case SIOC_QETH_ARP_FLUSH_CACHE:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
.ndo_neigh_setup = qeth_l3_neigh_setup,
};
-static int qeth_l3_setup_netdev(struct qeth_card *card)
+static int qeth_l3_setup_netdev(struct qeth_card *card, bool carrier_ok)
{
unsigned int headroom;
int rc;
- if (card->dev->netdev_ops)
+ if (qeth_netdev_is_registered(card->dev))
return 0;
if (card->info.type == QETH_CARD_TYPE_OSD ||
netif_napi_add(card->dev, &card->napi, qeth_poll, QETH_NAPI_WEIGHT);
rc = register_netdev(card->dev);
+ if (!rc && carrier_ok)
+ netif_carrier_on(card->dev);
+
out:
if (rc)
card->dev->netdev_ops = NULL;
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
- unregister_netdev(card->dev);
+ if (qeth_netdev_is_registered(card->dev))
+ unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
qeth_l3_clear_ipato_list(card);
}
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
int rc = 0;
enum qeth_card_states recover_flag;
+ bool carrier_ok;
mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
recover_flag = card->state;
- rc = qeth_core_hardsetup_card(card);
+ rc = qeth_core_hardsetup_card(card, &carrier_ok);
if (rc) {
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
- rc = qeth_l3_setup_netdev(card);
+ rc = qeth_l3_setup_netdev(card, carrier_ok);
if (rc)
goto out_remove;
hrtimer_init(&s->rx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
s->rx_timer.function = rx_timer_fn;
+ s->chan_rx_saved = s->chan_rx = chan;
+
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
-
- s->chan_rx_saved = s->chan_rx = chan;
}
}
static int sci_remove(struct platform_device *dev)
{
struct sci_port *port = platform_get_drvdata(dev);
+ unsigned int type = port->port.type; /* uart_remove_... clears it */
sci_ports_in_use &= ~BIT(port->port.line);
uart_remove_one_port(&sci_uart_driver, &port->port);
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_trigger.attr);
}
- if (port->port.type == PORT_SCIFA || port->port.type == PORT_SCIFB ||
- port->port.type == PORT_HSCIF) {
+ if (type == PORT_SCIFA || type == PORT_SCIFB || type == PORT_HSCIF) {
sysfs_remove_file(&dev->dev.kobj,
&dev_attr_rx_fifo_timeout.attr);
}
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
}
EXPORT_SYMBOL(tty_termios_baud_rate);
else
cbaud += 15;
}
- return baud_table[cbaud];
+ return cbaud >= n_baud_table ? 0 : baud_table[cbaud];
#else /* IBSHIFT */
return tty_termios_baud_rate(termios);
#endif /* IBSHIFT */
scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
vc->vc_need_wrap = 0;
if (con_should_update(vc))
- do_update_region(vc, (unsigned long) start, count);
+ do_update_region(vc, (unsigned long)(start + offset), count);
}
static void csi_X(struct vc_data *vc, int vpar) /* erase the following vpar positions */
if TYPEC_UCSI
+config UCSI_CCG
+ tristate "UCSI Interface Driver for Cypress CCGx"
+ depends on I2C
+ help
+ This driver enables UCSI support on platforms that expose a
+ Cypress CCGx Type-C controller over I2C interface.
+
+ To compile the driver as a module, choose M here: the module will be
+ called ucsi_ccg.
+
config UCSI_ACPI
tristate "UCSI ACPI Interface Driver"
depends on ACPI
typec_ucsi-$(CONFIG_TRACING) += trace.o
obj-$(CONFIG_UCSI_ACPI) += ucsi_acpi.o
+
+obj-$(CONFIG_UCSI_CCG) += ucsi_ccg.o
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * UCSI driver for Cypress CCGx Type-C controller
+ *
+ * Copyright (C) 2017-2018 NVIDIA Corporation. All rights reserved.
+ * Author: Ajay Gupta <ajayg@nvidia.com>
+ *
+ * Some code borrowed from drivers/usb/typec/ucsi/ucsi_acpi.c
+ */
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/unaligned.h>
+#include "ucsi.h"
+
+struct ucsi_ccg {
+ struct device *dev;
+ struct ucsi *ucsi;
+ struct ucsi_ppm ppm;
+ struct i2c_client *client;
+};
+
+#define CCGX_RAB_INTR_REG 0x06
+#define CCGX_RAB_UCSI_CONTROL 0x39
+#define CCGX_RAB_UCSI_CONTROL_START BIT(0)
+#define CCGX_RAB_UCSI_CONTROL_STOP BIT(1)
+#define CCGX_RAB_UCSI_DATA_BLOCK(offset) (0xf000 | ((offset) & 0xff))
+
+static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ const struct i2c_adapter_quirks *quirks = client->adapter->quirks;
+ unsigned char buf[2];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ .len = sizeof(buf),
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .buf = data,
+ },
+ };
+ u32 rlen, rem_len = len, max_read_len = len;
+ int status;
+
+ /* check any max_read_len limitation on i2c adapter */
+ if (quirks && quirks->max_read_len)
+ max_read_len = quirks->max_read_len;
+
+ while (rem_len > 0) {
+ msgs[1].buf = &data[len - rem_len];
+ rlen = min_t(u16, rem_len, max_read_len);
+ msgs[1].len = rlen;
+ put_unaligned_le16(rab, buf);
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ return status;
+ }
+ rab += rlen;
+ rem_len -= rlen;
+ }
+
+ return 0;
+}
+
+static int ccg_write(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
+{
+ struct i2c_client *client = uc->client;
+ unsigned char *buf;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .flags = 0x0,
+ }
+ };
+ int status;
+
+ buf = kzalloc(len + sizeof(rab), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ put_unaligned_le16(rab, buf);
+ memcpy(buf + sizeof(rab), data, len);
+
+ msgs[0].len = len + sizeof(rab);
+ msgs[0].buf = buf;
+
+ status = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (status < 0) {
+ dev_err(uc->dev, "i2c_transfer failed %d\n", status);
+ kfree(buf);
+ return status;
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+static int ucsi_ccg_init(struct ucsi_ccg *uc)
+{
+ unsigned int count = 10;
+ u8 data;
+ int status;
+
+ data = CCGX_RAB_UCSI_CONTROL_STOP;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ data = CCGX_RAB_UCSI_CONTROL_START;
+ status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ /*
+ * Flush CCGx RESPONSE queue by acking interrupts. Above ucsi control
+ * register write will push response which must be cleared.
+ */
+ do {
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ if (!data)
+ return 0;
+
+ status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ usleep_range(10000, 11000);
+ } while (--count);
+
+ return -ETIMEDOUT;
+}
+
+static int ucsi_ccg_send_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_out));
+ status = ccg_write(uc, rab, ppm +
+ offsetof(struct ucsi_data, message_out),
+ sizeof(uc->ppm.data->message_out));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, ctrl));
+ return ccg_write(uc, rab, ppm + offsetof(struct ucsi_data, ctrl),
+ sizeof(uc->ppm.data->ctrl));
+}
+
+static int ucsi_ccg_recv_data(struct ucsi_ccg *uc)
+{
+ u8 *ppm = (u8 *)uc->ppm.data;
+ int status;
+ u16 rab;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, cci));
+ status = ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, cci),
+ sizeof(uc->ppm.data->cci));
+ if (status < 0)
+ return status;
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, message_in));
+ return ccg_read(uc, rab, ppm + offsetof(struct ucsi_data, message_in),
+ sizeof(uc->ppm.data->message_in));
+}
+
+static int ucsi_ccg_ack_interrupt(struct ucsi_ccg *uc)
+{
+ int status;
+ unsigned char data;
+
+ status = ccg_read(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+ if (status < 0)
+ return status;
+
+ return ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
+}
+
+static int ucsi_ccg_sync(struct ucsi_ppm *ppm)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+ int status;
+
+ status = ucsi_ccg_recv_data(uc);
+ if (status < 0)
+ return status;
+
+ /* ack interrupt to allow next command to run */
+ return ucsi_ccg_ack_interrupt(uc);
+}
+
+static int ucsi_ccg_cmd(struct ucsi_ppm *ppm, struct ucsi_control *ctrl)
+{
+ struct ucsi_ccg *uc = container_of(ppm, struct ucsi_ccg, ppm);
+
+ ppm->data->ctrl.raw_cmd = ctrl->raw_cmd;
+ return ucsi_ccg_send_data(uc);
+}
+
+static irqreturn_t ccg_irq_handler(int irq, void *data)
+{
+ struct ucsi_ccg *uc = data;
+
+ ucsi_notify(uc->ucsi);
+
+ return IRQ_HANDLED;
+}
+
+static int ucsi_ccg_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ucsi_ccg *uc;
+ int status;
+ u16 rab;
+
+ uc = devm_kzalloc(dev, sizeof(*uc), GFP_KERNEL);
+ if (!uc)
+ return -ENOMEM;
+
+ uc->ppm.data = devm_kzalloc(dev, sizeof(struct ucsi_data), GFP_KERNEL);
+ if (!uc->ppm.data)
+ return -ENOMEM;
+
+ uc->ppm.cmd = ucsi_ccg_cmd;
+ uc->ppm.sync = ucsi_ccg_sync;
+ uc->dev = dev;
+ uc->client = client;
+
+ /* reset ccg device and initialize ucsi */
+ status = ucsi_ccg_init(uc);
+ if (status < 0) {
+ dev_err(uc->dev, "ucsi_ccg_init failed - %d\n", status);
+ return status;
+ }
+
+ status = devm_request_threaded_irq(dev, client->irq, NULL,
+ ccg_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), uc);
+ if (status < 0) {
+ dev_err(uc->dev, "request_threaded_irq failed - %d\n", status);
+ return status;
+ }
+
+ uc->ucsi = ucsi_register_ppm(dev, &uc->ppm);
+ if (IS_ERR(uc->ucsi)) {
+ dev_err(uc->dev, "ucsi_register_ppm failed\n");
+ return PTR_ERR(uc->ucsi);
+ }
+
+ rab = CCGX_RAB_UCSI_DATA_BLOCK(offsetof(struct ucsi_data, version));
+ status = ccg_read(uc, rab, (u8 *)(uc->ppm.data) +
+ offsetof(struct ucsi_data, version),
+ sizeof(uc->ppm.data->version));
+ if (status < 0) {
+ ucsi_unregister_ppm(uc->ucsi);
+ return status;
+ }
+
+ i2c_set_clientdata(client, uc);
+ return 0;
+}
+
+static int ucsi_ccg_remove(struct i2c_client *client)
+{
+ struct ucsi_ccg *uc = i2c_get_clientdata(client);
+
+ ucsi_unregister_ppm(uc->ucsi);
+
+ return 0;
+}
+
+static const struct i2c_device_id ucsi_ccg_device_id[] = {
+ {"ccgx-ucsi", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ucsi_ccg_device_id);
+
+static struct i2c_driver ucsi_ccg_driver = {
+ .driver = {
+ .name = "ucsi_ccg",
+ },
+ .probe = ucsi_ccg_probe,
+ .remove = ucsi_ccg_remove,
+ .id_table = ucsi_ccg_device_id,
+};
+
+module_i2c_driver(ucsi_ccg_driver);
+
+MODULE_AUTHOR("Ajay Gupta <ajayg@nvidia.com>");
+MODULE_DESCRIPTION("UCSI driver for Cypress CCGx Type-C controller");
+MODULE_LICENSE("GPL v2");
ret = xenmem_reservation_increase(args->nr_pages, args->frames);
if (ret != args->nr_pages) {
- pr_debug("Failed to decrease reservation for DMA buffer\n");
+ pr_debug("Failed to increase reservation for DMA buffer\n");
ret = -EFAULT;
} else {
ret = 0;
MODULE_LICENSE("GPL");
-static unsigned int limit = 64;
-module_param(limit, uint, 0644);
-MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
- "the privcmd-buf device per open file");
-
struct privcmd_buf_private {
struct mutex lock;
struct list_head list;
- unsigned int allocated;
};
struct privcmd_buf_vma_private {
{
unsigned int i;
- vma_priv->file_priv->allocated -= vma_priv->n_pages;
-
list_del(&vma_priv->list);
for (i = 0; i < vma_priv->n_pages; i++)
- if (vma_priv->pages[i])
- __free_page(vma_priv->pages[i]);
+ __free_page(vma_priv->pages[i]);
kfree(vma_priv);
}
unsigned int i;
int ret = 0;
- if (!(vma->vm_flags & VM_SHARED) || count > limit ||
- file_priv->allocated + count > limit)
+ if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
if (!vma_priv)
return -ENOMEM;
- vma_priv->n_pages = count;
- count = 0;
- for (i = 0; i < vma_priv->n_pages; i++) {
+ for (i = 0; i < count; i++) {
vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!vma_priv->pages[i])
break;
- count++;
+ vma_priv->n_pages++;
}
mutex_lock(&file_priv->lock);
- file_priv->allocated += count;
-
vma_priv->file_priv = file_priv;
vma_priv->users = 1;
int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void);
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new,
+ struct btrfs_path *path);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct btrfs_root *root = arg;
struct btrfs_fs_info *fs_info = root->fs_info;
int again;
- struct btrfs_trans_handle *trans;
- do {
+ while (1) {
again = 0;
/* Make the cleaner go to sleep early. */
*/
btrfs_delete_unused_bgs(fs_info);
sleep:
+ if (kthread_should_park())
+ kthread_parkme();
+ if (kthread_should_stop())
+ return 0;
if (!again) {
set_current_state(TASK_INTERRUPTIBLE);
- if (!kthread_should_stop())
- schedule();
+ schedule();
__set_current_state(TASK_RUNNING);
}
- } while (!kthread_should_stop());
-
- /*
- * Transaction kthread is stopped before us and wakes us up.
- * However we might have started a new transaction and COWed some
- * tree blocks when deleting unused block groups for example. So
- * make sure we commit the transaction we started to have a clean
- * shutdown when evicting the btree inode - if it has dirty pages
- * when we do the final iput() on it, eviction will trigger a
- * writeback for it which will fail with null pointer dereferences
- * since work queues and other resources were already released and
- * destroyed by the time the iput/eviction/writeback is made.
- */
- trans = btrfs_attach_transaction(root);
- if (IS_ERR(trans)) {
- if (PTR_ERR(trans) != -ENOENT)
- btrfs_err(fs_info,
- "cleaner transaction attach returned %ld",
- PTR_ERR(trans));
- } else {
- int ret;
-
- ret = btrfs_commit_transaction(trans);
- if (ret)
- btrfs_err(fs_info,
- "cleaner open transaction commit returned %d",
- ret);
}
-
- return 0;
}
static int transaction_kthread(void *arg)
int ret;
set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags);
+ /*
+ * We don't want the cleaner to start new transactions, add more delayed
+ * iputs, etc. while we're closing. We can't use kthread_stop() yet
+ * because that frees the task_struct, and the transaction kthread might
+ * still try to wake up the cleaner.
+ */
+ kthread_park(fs_info->cleaner_kthread);
/* wait for the qgroup rescan worker to stop */
btrfs_qgroup_wait_for_completion(fs_info, false);
if (!sb_rdonly(fs_info->sb)) {
/*
- * If the cleaner thread is stopped and there are
- * block groups queued for removal, the deletion will be
- * skipped when we quit the cleaner thread.
+ * The cleaner kthread is stopped, so do one final pass over
+ * unused block groups.
*/
btrfs_delete_unused_bgs(fs_info);
unpin = pinned_extents;
again:
while (1) {
+ /*
+ * The btrfs_finish_extent_commit() may get the same range as
+ * ours between find_first_extent_bit and clear_extent_dirty.
+ * Hence, hold the unused_bg_unpin_mutex to avoid double unpin
+ * the same extent range.
+ */
+ mutex_lock(&fs_info->unused_bg_unpin_mutex);
ret = find_first_extent_bit(unpin, 0, &start, &end,
EXTENT_DIRTY, NULL);
- if (ret)
+ if (ret) {
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
break;
+ }
clear_extent_dirty(unpin, start, end);
btrfs_error_unpin_extent_range(fs_info, start, end);
+ mutex_unlock(&fs_info->unused_bg_unpin_mutex);
cond_resched();
}
* sure NOFS is set to keep us from deadlocking.
*/
nofs_flag = memalloc_nofs_save();
- inode = btrfs_iget(fs_info->sb, &location, root, NULL);
+ inode = btrfs_iget_path(fs_info->sb, &location, root, NULL, path);
+ btrfs_release_path(path);
memalloc_nofs_restore(nofs_flag);
if (IS_ERR(inode))
return inode;
path->search_commit_root = 1;
path->skip_locking = 1;
+ /*
+ * We must pass a path with search_commit_root set to btrfs_iget in
+ * order to avoid a deadlock when allocating extents for the tree root.
+ *
+ * When we are COWing an extent buffer from the tree root, when looking
+ * for a free extent, at extent-tree.c:find_free_extent(), we can find
+ * block group without its free space cache loaded. When we find one
+ * we must load its space cache which requires reading its free space
+ * cache's inode item from the root tree. If this inode item is located
+ * in the same leaf that we started COWing before, then we end up in
+ * deadlock on the extent buffer (trying to read lock it when we
+ * previously write locked it).
+ *
+ * It's safe to read the inode item using the commit root because
+ * block groups, once loaded, stay in memory forever (until they are
+ * removed) as well as their space caches once loaded. New block groups
+ * once created get their ->cached field set to BTRFS_CACHE_FINISHED so
+ * we will never try to read their inode item while the fs is mounted.
+ */
inode = lookup_free_space_inode(fs_info, block_group, path);
if (IS_ERR(inode)) {
btrfs_free_path(path);
}
btrfs_release_path(path);
- if (cur_offset <= end && cow_start == (u64)-1) {
+ if (cur_offset <= end && cow_start == (u64)-1)
cow_start = cur_offset;
- cur_offset = end;
- }
if (cow_start != (u64)-1) {
+ cur_offset = end;
ret = cow_file_range(inode, locked_page, cow_start, end, end,
page_started, nr_written, 1, NULL);
if (ret)
/*
* read an inode from the btree into the in-memory inode
*/
-static int btrfs_read_locked_inode(struct inode *inode)
+static int btrfs_read_locked_inode(struct inode *inode,
+ struct btrfs_path *in_path)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
- struct btrfs_path *path;
+ struct btrfs_path *path = in_path;
struct extent_buffer *leaf;
struct btrfs_inode_item *inode_item;
struct btrfs_root *root = BTRFS_I(inode)->root;
if (!ret)
filled = true;
- path = btrfs_alloc_path();
- if (!path)
- return -ENOMEM;
+ if (!path) {
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+ }
memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
if (ret) {
- btrfs_free_path(path);
+ if (path != in_path)
+ btrfs_free_path(path);
return ret;
}
btrfs_ino(BTRFS_I(inode)),
root->root_key.objectid, ret);
}
- btrfs_free_path(path);
+ if (path != in_path)
+ btrfs_free_path(path);
if (!maybe_acls)
cache_no_acl(inode);
/* Get an inode object given its location and corresponding root.
* Returns in *is_new if the inode was read from disk
*/
-struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
- struct btrfs_root *root, int *new)
+struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new,
+ struct btrfs_path *path)
{
struct inode *inode;
if (inode->i_state & I_NEW) {
int ret;
- ret = btrfs_read_locked_inode(inode);
+ ret = btrfs_read_locked_inode(inode, path);
if (!ret) {
inode_tree_add(inode);
unlock_new_inode(inode);
return inode;
}
+struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
+ struct btrfs_root *root, int *new)
+{
+ return btrfs_iget_path(s, location, root, new, NULL);
+}
+
static struct inode *new_simple_dir(struct super_block *s,
struct btrfs_key *key,
struct btrfs_root *root)
const u64 sz = BTRFS_I(src)->root->fs_info->sectorsize;
len = round_down(i_size_read(src), sz) - loff;
+ if (len == 0)
+ return 0;
olen = len;
}
}
goto out_unlock;
if (len == 0)
olen = len = src->i_size - off;
- /* if we extend to eof, continue to block boundary */
- if (off + len == src->i_size)
+ /*
+ * If we extend to eof, continue to block boundary if and only if the
+ * destination end offset matches the destination file's size, otherwise
+ * we would be corrupting data by placing the eof block into the middle
+ * of a file.
+ */
+ if (off + len == src->i_size) {
+ if (!IS_ALIGNED(len, bs) && destoff + len < inode->i_size)
+ goto out_unlock;
len = ALIGN(src->i_size, bs) - off;
+ }
if (len == 0) {
ret = 0;
}
/* Used to sort the devices by max_avail(descending sort) */
-static int btrfs_cmp_device_free_bytes(const void *dev_info1,
+static inline int btrfs_cmp_device_free_bytes(const void *dev_info1,
const void *dev_info2)
{
if (((struct btrfs_device_info *)dev_info1)->max_avail >
* The helper to calc the free space on the devices that can be used to store
* file data.
*/
-static int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
- u64 *free_bytes)
+static inline int btrfs_calc_avail_data_space(struct btrfs_fs_info *fs_info,
+ u64 *free_bytes)
{
struct btrfs_device_info *devices_info;
struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
type != (BTRFS_BLOCK_GROUP_METADATA |
BTRFS_BLOCK_GROUP_DATA)) {
block_group_err(fs_info, leaf, slot,
-"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llu or 0x%llx",
+"invalid type, have 0x%llx (%lu bits set) expect either 0x%llx, 0x%llx, 0x%llx or 0x%llx",
type, hweight64(type),
BTRFS_BLOCK_GROUP_DATA, BTRFS_BLOCK_GROUP_METADATA,
BTRFS_BLOCK_GROUP_SYSTEM,
logged_end = end;
list_for_each_entry_safe(em, n, &tree->modified_extents, list) {
+ /*
+ * Skip extents outside our logging range. It's important to do
+ * it for correctness because if we don't ignore them, we may
+ * log them before their ordered extent completes, and therefore
+ * we could log them without logging their respective checksums
+ * (the checksum items are added to the csum tree at the very
+ * end of btrfs_finish_ordered_io()). Also leave such extents
+ * outside of our range in the list, since we may have another
+ * ranged fsync in the near future that needs them. If an extent
+ * outside our range corresponds to a hole, log it to avoid
+ * leaving gaps between extents (fsck will complain when we are
+ * not using the NO_HOLES feature).
+ */
+ if ((em->start > end || em->start + em->len <= start) &&
+ em->block_start != EXTENT_MAP_HOLE)
+ continue;
+
list_del_init(&em->list);
/*
* Just an arbitrary number, this can be really CPU intensive
if (!prealloc_cf)
return -ENOMEM;
- /* Start by sync'ing the source file */
+ /* Start by sync'ing the source and destination files */
ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
- if (ret < 0)
+ if (ret < 0) {
+ dout("failed to write src file (%zd)\n", ret);
+ goto out;
+ }
+ ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
+ if (ret < 0) {
+ dout("failed to write dst file (%zd)\n", ret);
goto out;
+ }
/*
* We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
info->symlink = *p;
*p += info->symlink_len;
- if (features & CEPH_FEATURE_DIRLAYOUTHASH)
- ceph_decode_copy_safe(p, end, &info->dir_layout,
- sizeof(info->dir_layout), bad);
- else
- memset(&info->dir_layout, 0, sizeof(info->dir_layout));
-
+ ceph_decode_copy_safe(p, end, &info->dir_layout,
+ sizeof(info->dir_layout), bad);
ceph_decode_32_safe(p, end, info->xattr_len, bad);
ceph_decode_need(p, end, info->xattr_len, bad);
info->xattr_data = *p;
recon_state.pagelist = pagelist;
if (session->s_con.peer_features & CEPH_FEATURE_MDSENC)
recon_state.msg_version = 3;
- else if (session->s_con.peer_features & CEPH_FEATURE_FLOCK)
- recon_state.msg_version = 2;
else
- recon_state.msg_version = 1;
+ recon_state.msg_version = 2;
err = iterate_session_caps(session, encode_caps_cb, &recon_state);
if (err < 0)
goto fail;
ceph_put_snap_realm(mdsc, realm);
realm = next;
}
- ceph_put_snap_realm(mdsc, realm);
+ if (realm)
+ ceph_put_snap_realm(mdsc, realm);
up_read(&mdsc->snap_rwsem);
return exceeded;
{
int err = 0;
- if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
+ if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb)))) {
+ put_bh(iloc->bh);
return -EIO;
-
+ }
if (IS_I_VERSION(inode))
inode_inc_iversion(inode);
if (!is_dx_block && type == INDEX) {
ext4_error_inode(inode, func, line, block,
"directory leaf block found instead of index block");
+ brelse(bh);
return ERR_PTR(-EFSCORRUPTED);
}
if (!ext4_has_metadata_csum(inode->i_sb) ||
list_del_init(&EXT4_I(inode)->i_orphan);
mutex_unlock(&sbi->s_orphan_lock);
}
- }
+ } else
+ brelse(iloc.bh);
+
jbd_debug(4, "superblock will point to %lu\n", inode->i_ino);
jbd_debug(4, "orphan inode %lu will point to %d\n",
inode->i_ino, NEXT_ORPHAN(inode));
BUFFER_TRACE(bh, "get_write_access");
err = ext4_journal_get_write_access(handle, bh);
- if (err)
+ if (err) {
+ brelse(bh);
return err;
+ }
ext4_debug("mark block bitmap %#04llx (+%llu/%u)\n",
first_cluster, first_cluster - start, count2);
ext4_set_bits(bh->b_data, first_cluster - start, count2);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (unlikely(err))
return err;
- brelse(bh);
}
return 0;
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
overhead = ext4_group_overhead_blocks(sb, group);
ext4_mark_bitmap_end(EXT4_B2C(sbi, group_data[i].blocks_count),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
handle_ib:
if (bg_flags[i] & EXT4_BG_INODE_UNINIT)
bh = bclean(handle, sb, block);
if (IS_ERR(bh)) {
err = PTR_ERR(bh);
- bh = NULL;
goto out;
}
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data);
err = ext4_handle_dirty_metadata(handle, NULL, bh);
+ brelse(bh);
if (err)
goto out;
- brelse(bh);
}
- bh = NULL;
/* Mark group tables in block bitmap */
for (j = 0; j < GROUP_TABLE_COUNT; j++) {
}
out:
- brelse(bh);
err2 = ext4_journal_stop(handle);
if (err2 && !err)
err = err2;
err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
if (unlikely(err)) {
ext4_std_error(sb, err);
+ iloc.bh = NULL;
goto exit_inode;
}
brelse(dind);
sizeof(struct buffer_head *),
GFP_NOFS);
if (!n_group_desc) {
+ brelse(gdb_bh);
err = -ENOMEM;
ext4_warning(sb, "not enough memory for %lu groups",
gdb_num + 1);
kvfree(o_group_desc);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
- if (unlikely(err))
- brelse(gdb_bh);
return err;
}
backup_block, backup_block -
ext4_group_first_block_no(sb, group));
BUFFER_TRACE(bh, "get_write_access");
- if ((err = ext4_journal_get_write_access(handle, bh)))
+ if ((err = ext4_journal_get_write_access(handle, bh))) {
+ brelse(bh);
break;
+ }
lock_buffer(bh);
memcpy(bh->b_data, data, size);
if (rest)
err = ext4_alloc_flex_bg_array(sb, n_group + 1);
if (err)
- return err;
+ goto out;
err = ext4_mb_alloc_groupinfo(sb, n_group + 1);
if (err)
n_blocks_count_retry = 0;
free_flex_gd(flex_gd);
flex_gd = NULL;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
goto retry;
}
sbi->s_groups_count = blocks_count;
sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
+ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+ le32_to_cpu(es->s_inodes_count)) {
+ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+ le32_to_cpu(es->s_inodes_count),
+ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+ ret = -EINVAL;
+ goto failed_mount;
+ }
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
ret = -ENOMEM;
goto failed_mount;
}
- if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
- le32_to_cpu(es->s_inodes_count)) {
- ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
- le32_to_cpu(es->s_inodes_count),
- ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
- ret = -EINVAL;
- goto failed_mount;
- }
bgl_lock_init(sbi->s_blockgroup_lock);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
+ percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
inode_lock(ea_inode);
ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
- if (ret) {
- iloc.bh = NULL;
+ if (ret)
goto out;
- }
ref_count = ext4_xattr_inode_get_ref(ea_inode);
ref_count += ref_change;
}
ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
- iloc.bh = NULL;
if (ret)
ext4_warning_inode(ea_inode,
"ext4_mark_iloc_dirty() failed ret=%d", ret);
out:
- brelse(iloc.bh);
inode_unlock(ea_inode);
return ret;
}
bh = ext4_getblk(handle, ea_inode, block, 0);
if (IS_ERR(bh))
return PTR_ERR(bh);
+ if (!bh) {
+ WARN_ON_ONCE(1);
+ EXT4_ERROR_INODE(ea_inode,
+ "ext4_getblk() return bh = NULL");
+ return -EFSCORRUPTED;
+ }
ret = ext4_journal_get_write_access(handle, bh);
if (ret)
goto out;
if (!bh)
return ERR_PTR(-EIO);
error = ext4_xattr_check_block(inode, bh);
- if (error)
+ if (error) {
+ brelse(bh);
return ERR_PTR(error);
+ }
return bh;
}
error = ext4_xattr_block_set(handle, inode, &i, &bs);
} else if (error == -ENOSPC) {
if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
+ brelse(bs.bh);
+ bs.bh = NULL;
error = ext4_xattr_block_find(inode, &i, &bs);
if (error)
goto cleanup;
kfree(buffer);
if (is)
brelse(is->iloc.bh);
+ if (bs)
+ brelse(bs->bh);
kfree(is);
kfree(bs);
struct ext4_inode *raw_inode, handle_t *handle)
{
struct ext4_xattr_ibody_header *header;
- struct buffer_head *bh;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
static unsigned int mnt_count;
size_t min_offs;
* EA block can hold new_extra_isize bytes.
*/
if (EXT4_I(inode)->i_file_acl) {
+ struct buffer_head *bh;
+
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
error = -EIO;
if (!bh)
goto cleanup;
error = ext4_xattr_check_block(inode, bh);
- if (error)
+ if (error) {
+ brelse(bh);
goto cleanup;
+ }
base = BHDR(bh);
end = bh->b_data + bh->b_size;
min_offs = end - base;
namespace_lock();
lock_mount_hash();
- event++;
+ /* Recheck MNT_LOCKED with the locks held */
+ retval = -EINVAL;
+ if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ goto out;
+
+ event++;
if (flags & MNT_DETACH) {
if (!list_empty(&mnt->mnt_list))
umount_tree(mnt, UMOUNT_PROPAGATE);
retval = 0;
}
}
+out:
unlock_mount_hash();
namespace_unlock();
return retval;
goto dput_and_out;
if (!check_mnt(mnt))
goto dput_and_out;
- if (mnt->mnt.mnt_flags & MNT_LOCKED)
+ if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */
goto dput_and_out;
retval = -EPERM;
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
for (s = r; s; s = next_mnt(s, r)) {
if (!(flag & CL_COPY_UNBINDABLE) &&
IS_MNT_UNBINDABLE(s)) {
- s = skip_mnt_tree(s);
- continue;
+ if (s->mnt.mnt_flags & MNT_LOCKED) {
+ /* Both unbindable and locked. */
+ q = ERR_PTR(-EPERM);
+ goto out;
+ } else {
+ s = skip_mnt_tree(s);
+ continue;
+ }
}
if (!(flag & CL_COPY_MNT_NS_FILE) &&
is_mnt_ns_file(s->mnt.mnt_root)) {
{
namespace_lock();
lock_mount_hash();
- umount_tree(real_mount(mnt), UMOUNT_SYNC);
+ umount_tree(real_mount(mnt), 0);
unlock_mount_hash();
namespace_unlock();
}
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr_leaf_entry *entries;
- uint16_t end;
+ uint32_t end; /* must be 32bit - see below */
int i;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
/*
* Quickly check the freemap information. Attribute data has to be
* aligned to 4-byte boundaries, and likewise for the free space.
+ *
+ * Note that for 64k block size filesystems, the freemap entries cannot
+ * overflow as they are only be16 fields. However, when checking end
+ * pointer of the freemap, we have to be careful to detect overflows and
+ * so use uint32_t for those checks.
*/
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
return __this_address;
if (ichdr.freemap[i].size & 0x3)
return __this_address;
- end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+
+ /* be care of 16 bit overflows here */
+ end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
if (end < ichdr.freemap[i].base)
return __this_address;
if (end > mp->m_attr_geo->blksize)
error = 0;
out_free_buf:
kmem_free(buf);
- return 0;
+ return error;
}
struct getfsmap_info {
void
xfs_hex_dump(void *p, int length)
{
- print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_ADDRESS, 16, 1, p, length, 1);
+ print_hex_dump(KERN_ALERT, "", DUMP_PREFIX_OFFSET, 16, 1, p, length, 1);
}
#define _4LEVEL_FIXUP_H
#define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
#define PUD_SHIFT PGDIR_SHIFT
#define PUD_SIZE PGDIR_SIZE
#define _5LEVEL_FIXUP_H
#define __ARCH_HAS_5LEVEL_HACK
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
#define P4D_SHIFT PGDIR_SHIFT
#define P4D_SIZE PGDIR_SIZE
#ifndef __ASSEMBLY__
#include <asm-generic/5level-fixup.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a pgd gets the size right, and allows
#ifndef __ASSEMBLY__
-#define __PAGETABLE_P4D_FOLDED
+#define __PAGETABLE_P4D_FOLDED 1
typedef struct { pgd_t pgd; } p4d_t;
struct mm_struct;
-#define __PAGETABLE_PMD_FOLDED
+#define __PAGETABLE_PMD_FOLDED 1
/*
* Having the pmd type consist of a pud gets the size right, and allows
#else
#include <asm-generic/pgtable-nop4d.h>
-#define __PAGETABLE_PUD_FOLDED
+#define __PAGETABLE_PUD_FOLDED 1
/*
* Having the pud type consist of a p4d gets the size right, and allows
#endif
#endif
+/*
+ * On some architectures it depends on the mm if the p4d/pud or pmd
+ * layer of the page table hierarchy is folded or not.
+ */
+#ifndef mm_p4d_folded
+#define mm_p4d_folded(mm) __is_defined(__PAGETABLE_P4D_FOLDED)
+#endif
+
+#ifndef mm_pud_folded
+#define mm_pud_folded(mm) __is_defined(__PAGETABLE_PUD_FOLDED)
+#endif
+
+#ifndef mm_pmd_folded
+#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
CEPH_FEATURE_CEPHX_V2)
-#define CEPH_FEATURES_REQUIRED_DEFAULT \
- (CEPH_FEATURE_NOSRCADDR | \
- CEPH_FEATURE_SUBSCRIBE2 | \
- CEPH_FEATURE_RECONNECT_SEQ | \
- CEPH_FEATURE_PGID64 | \
- CEPH_FEATURE_PGPOOL3 | \
- CEPH_FEATURE_OSDENC)
+#define CEPH_FEATURES_REQUIRED_DEFAULT 0
#endif
#define KASAN_ABI_VERSION 3
#endif
-/*
- * Because __no_sanitize_address conflicts with inlining:
- * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
- * we do one or the other.
- */
-#ifdef CONFIG_KASAN
-#define __no_sanitize_address_or_inline \
- __no_sanitize_address __maybe_unused notrace
-#else
-#define __no_sanitize_address_or_inline inline
-#endif
-
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368
* '__maybe_unused' allows us to avoid defined-but-not-used warnings.
*/
-# define __no_kasan_or_inline __no_sanitize_address __maybe_unused
+# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused
#else
# define __no_kasan_or_inline __always_inline
#endif
/*
* The attributes in this file are unconditionally defined and they directly
- * map to compiler attribute(s) -- except those that are optional.
+ * map to compiler attribute(s), unless one of the compilers does not support
+ * the attribute. In that case, __has_attribute is used to check for support
+ * and the reason is stated in its comment ("Optional: ...").
*
* Any other "attributes" (i.e. those that depend on a configuration option,
* on a compiler, on an architecture, on plugins, on other attributes...)
* should be defined elsewhere (e.g. compiler_types.h or compiler-*.h).
+ * The intention is to keep this file as simple as possible, as well as
+ * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks).
*
* This file is meant to be sorted (by actual attribute name,
* not by #define identifier). Use the __attribute__((__name__)) syntax
* (i.e. with underscores) to avoid future collisions with other macros.
- * If an attribute is optional, state the reason in the comment.
+ * Provide links to the documentation of each supported compiler, if it exists.
*/
/*
- * To check for optional attributes, we use __has_attribute, which is supported
- * on gcc >= 5, clang >= 2.9 and icc >= 17. In the meantime, to support
- * 4.6 <= gcc < 5, we implement __has_attribute by hand.
+ * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17.
+ * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute
+ * by hand.
*
* sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__
* depending on the compiler used to build it; however, these attributes have
# define randomized_struct_fields_end
#endif
+#ifndef asm_volatile_goto
+#define asm_volatile_goto(x...) asm goto(x)
+#endif
+
/* Are two types/vars the same type (ignoring qualifiers)? */
#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
* input will not be passed to raw_event unless hid_device_io_start is
* called.
*
- * raw_event and event should return 0 on no action performed, 1 when no
- * further processing should be done and negative on error
+ * raw_event and event should return negative on error, any other value will
+ * pass the event on to .event() typically return 0 for success.
*
* input_mapping shall return a negative value to completely ignore this usage
* (e.g. doubled or invalid usage), zero to continue with parsing of this
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
extern raw_spinlock_t i8253_lock;
+extern bool i8253_clear_counter_on_shutdown;
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
static inline void mm_inc_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_puds(struct mm_struct *mm)
{
+ if (mm_pud_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
}
#endif
static inline void mm_inc_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
static inline void mm_dec_nr_pmds(struct mm_struct *mm)
{
+ if (mm_pmd_folded(mm))
+ return;
atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
}
#endif
*/
static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
{
- return (u64)nand->memorg.luns_per_target *
- nand->memorg.eraseblocks_per_lun *
- nand->memorg.pages_per_eraseblock;
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
}
/**
}
/**
- * nanddev_pos_next_eraseblock() - Move a position to the next page
+ * nanddev_pos_next_page() - Move a position to the next page
* @nand: NAND device
* @pos: the position to update
*
#endif
}
+/* Variant of netdev_tx_sent_queue() for drivers that are aware
+ * that they should not test BQL status themselves.
+ * We do want to change __QUEUE_STATE_STACK_XOFF only for the last
+ * skb of a batch.
+ * Returns true if the doorbell must be used to kick the NIC.
+ */
+static inline bool __netdev_tx_sent_queue(struct netdev_queue *dev_queue,
+ unsigned int bytes,
+ bool xmit_more)
+{
+ if (xmit_more) {
+#ifdef CONFIG_BQL
+ dql_queued(&dev_queue->dql, bytes);
+#endif
+ return netif_tx_queue_stopped(dev_queue);
+ }
+ netdev_tx_sent_queue(dev_queue, bytes);
+ return true;
+}
+
/**
* netdev_sent_queue - report the number of bytes queued to hardware
* @dev: network device
extern ip_set_id_t ip_set_get_byname(struct net *net,
const char *name, struct ip_set **set);
extern void ip_set_put_byindex(struct net *net, ip_set_id_t index);
-extern const char *ip_set_name_byindex(struct net *net, ip_set_id_t index);
+extern void ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name);
extern ip_set_id_t ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index);
extern void ip_set_nfnl_put(struct net *net, ip_set_id_t index);
rcu_assign_pointer(comment->c, c);
}
-/* Used only when dumping a set, protected by rcu_read_lock_bh() */
+/* Used only when dumping a set, protected by rcu_read_lock() */
static inline int
ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
{
- struct ip_set_comment_rcu *c = rcu_dereference_bh(comment->c);
+ struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
if (!c)
return 0;
void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
int watchdog_nmi_probe(void);
+int watchdog_nmi_enable(unsigned int cpu);
+void watchdog_nmi_disable(unsigned int cpu);
/**
* touch_nmi_watchdog - restart NMI watchdog timeout.
const struct in6_addr *addr);
bool ipv6_chk_acast_addr_src(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
+int ipv6_anycast_init(void);
+void ipv6_anycast_cleanup(void);
/* Device notifier */
int register_inet6addr_notifier(struct notifier_block *nb);
struct in6_addr aca_addr;
struct fib6_info *aca_rt;
struct ifacaddr6 *aca_next;
+ struct hlist_node aca_addr_lst;
int aca_users;
refcount_t aca_refcnt;
unsigned long aca_cstamp;
unsigned long aca_tstamp;
+ struct rcu_head rcu;
};
#define IFA_HOST IPV6_ADDR_LOOPBACK
const char *fmt, ...) { }
#endif /* CONFIG_SYSCTL */
+static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.generic;
+}
+
+static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.tcp;
+}
+
+static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.udp;
+}
+
+static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmp;
+}
+
+static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.icmpv6;
+}
+
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.dccp;
+}
+#endif
+
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
+{
+ return &net->ct.nf_ct_proto.sctp;
+}
+#endif
+
#endif /*_NF_CONNTRACK_PROTOCOL_H*/
};
struct kfd_ioctl_get_queue_wave_state_args {
- uint64_t ctl_stack_address; /* to KFD */
- uint32_t ctl_stack_used_size; /* from KFD */
- uint32_t save_area_used_size; /* from KFD */
- uint32_t queue_id; /* to KFD */
- uint32_t pad;
+ __u64 ctl_stack_address; /* to KFD */
+ __u32 ctl_stack_used_size; /* from KFD */
+ __u32 save_area_used_size; /* from KFD */
+ __u32 queue_id; /* to KFD */
+ __u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
/* hw exception data */
struct kfd_hsa_hw_exception_data {
- uint32_t reset_type;
- uint32_t reset_cause;
- uint32_t memory_lost;
- uint32_t gpu_id;
+ __u32 reset_type;
+ __u32 reset_cause;
+ __u32 memory_lost;
+ __u32 gpu_id;
};
/* Event data */
NFTA_NG_MODULUS,
NFTA_NG_TYPE,
NFTA_NG_OFFSET,
- NFTA_NG_SET_NAME,
- NFTA_NG_SET_ID,
+ NFTA_NG_SET_NAME, /* deprecated */
+ NFTA_NG_SET_ID, /* deprecated */
__NFTA_NG_MAX
};
#define NFTA_NG_MAX (__NFTA_NG_MAX - 1)
#include <linux/if_vlan.h>
#include <linux/if_pppox.h>
+#ifndef __KERNEL__
+#include <limits.h> /* for INT_MIN, INT_MAX */
+#endif
+
/* Bridge Hooks */
/* After promisc drops, checksum checks. */
#define NF_BR_PRE_ROUTING 0
#define SCTP_ASSOC_CHANGE_DENIED 0x0004
#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+#define SCTP_STREAM_CHANGE_DENIED SCTP_ASSOC_CHANGE_DENIED
+#define SCTP_STREAM_CHANGE_FAILED SCTP_ASSOC_CHANGE_FAILED
struct sctp_stream_change_event {
__u16 strchange_type;
__u16 strchange_flags;
/* SCTP Stream schedulers */
enum sctp_sched_type {
SCTP_SS_FCFS,
+ SCTP_SS_DEFAULT = SCTP_SS_FCFS,
SCTP_SS_PRIO,
SCTP_SS_RR,
SCTP_SS_MAX = SCTP_SS_RR
extern unsigned long *xen_contiguous_bitmap;
-#ifdef CONFIG_XEN_PV
+#if defined(CONFIG_XEN_PV) || defined(CONFIG_ARM) || defined(CONFIG_ARM64)
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
-
-int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
- xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
- unsigned int domid, bool no_translate, struct page **pages);
#else
static inline int xen_create_contiguous_region(phys_addr_t pstart,
unsigned int order,
static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
unsigned int order) { }
+#endif
+#if defined(CONFIG_XEN_PV)
+int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
+ xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot,
+ unsigned int domid, bool no_translate, struct page **pages);
+#else
static inline int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr,
xen_pfn_t *pfn, int nr, int *err_ptr,
pgprot_t prot, unsigned int domid,
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym)
{
- unsigned long symbol_start, symbol_end;
struct bpf_prog_aux *aux;
unsigned int it = 0;
int ret = -ERANGE;
if (it++ != symnum)
continue;
- bpf_get_prog_addr_region(aux->prog, &symbol_start, &symbol_end);
bpf_get_prog_name(aux->prog, sym);
- *value = symbol_start;
+ *value = (unsigned long)aux->prog->bpf_func;
*type = BPF_SYM_ELF_TYPE;
ret = 0;
info.jited_prog_len = 0;
info.xlated_prog_len = 0;
info.nr_jited_ksyms = 0;
+ info.nr_jited_func_lens = 0;
goto done;
}
}
ulen = info.nr_jited_ksyms;
- info.nr_jited_ksyms = prog->aux->func_cnt;
+ info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
if (info.nr_jited_ksyms && ulen) {
if (bpf_dump_raw_ok()) {
+ unsigned long ksym_addr;
u64 __user *user_ksyms;
- ulong ksym_addr;
u32 i;
/* copy the address of the kernel symbol
*/
ulen = min_t(u32, info.nr_jited_ksyms, ulen);
user_ksyms = u64_to_user_ptr(info.jited_ksyms);
- for (i = 0; i < ulen; i++) {
- ksym_addr = (ulong) prog->aux->func[i]->bpf_func;
- ksym_addr &= PAGE_MASK;
- if (put_user((u64) ksym_addr, &user_ksyms[i]))
+ if (prog->aux->func_cnt) {
+ for (i = 0; i < ulen; i++) {
+ ksym_addr = (unsigned long)
+ prog->aux->func[i]->bpf_func;
+ if (put_user((u64) ksym_addr,
+ &user_ksyms[i]))
+ return -EFAULT;
+ }
+ } else {
+ ksym_addr = (unsigned long) prog->bpf_func;
+ if (put_user((u64) ksym_addr, &user_ksyms[0]))
return -EFAULT;
}
} else {
}
ulen = info.nr_jited_func_lens;
- info.nr_jited_func_lens = prog->aux->func_cnt;
+ info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
if (info.nr_jited_func_lens && ulen) {
if (bpf_dump_raw_ok()) {
u32 __user *user_lens;
/* copy the JITed image lengths for each function */
ulen = min_t(u32, info.nr_jited_func_lens, ulen);
user_lens = u64_to_user_ptr(info.jited_func_lens);
- for (i = 0; i < ulen; i++) {
- func_len = prog->aux->func[i]->jited_len;
- if (put_user(func_len, &user_lens[i]))
+ if (prog->aux->func_cnt) {
+ for (i = 0; i < ulen; i++) {
+ func_len =
+ prog->aux->func[i]->jited_len;
+ if (put_user(func_len, &user_lens[i]))
+ return -EFAULT;
+ }
+ } else {
+ func_len = prog->jited_len;
+ if (put_user(func_len, &user_lens[0]))
return -EFAULT;
}
} else {
EXPORT_SYMBOL(release_resource);
/**
- * Finds the lowest iomem resource that covers part of [start..end]. The
- * caller must specify start, end, flags, and desc (which may be
+ * Finds the lowest iomem resource that covers part of [@start..@end]. The
+ * caller must specify @start, @end, @flags, and @desc (which may be
* IORES_DESC_NONE).
*
- * If a resource is found, returns 0 and *res is overwritten with the part
- * of the resource that's within [start..end]; if none is found, returns
- * -1.
+ * If a resource is found, returns 0 and @*res is overwritten with the part
+ * of the resource that's within [@start..@end]; if none is found, returns
+ * -1 or -EINVAL for other invalid parameters.
*
* This function walks the whole tree and not just first level children
* unless @first_lvl is true.
+ *
+ * @start: start address of the resource searched for
+ * @end: end address of same resource
+ * @flags: flags which the resource must have
+ * @desc: descriptor the resource must have
+ * @first_lvl: walk only the first level children, if set
+ * @res: return ptr, if resource found
*/
static int find_next_iomem_res(resource_size_t start, resource_size_t end,
unsigned long flags, unsigned long desc,
* @flags: I/O resource flags
* @start: start addr
* @end: end addr
+ * @arg: function argument for the callback @func
+ * @func: callback function that is called for each qualifying resource area
*
* NOTE: For a new descriptor search, define a new IORES_DESC in
* <linux/ioport.h> and set it in 'desc' of a target resource entry.
/*
* There's no userspace yet to cause hotplug operations; hence all the
* CPU masks are stable and all blatant races in the below code cannot
- * happen.
+ * happen. The hotplug lock is nevertheless taken to satisfy lockdep,
+ * but there won't be any contention on it.
*/
+ cpus_read_lock();
mutex_lock(&sched_domains_mutex);
sched_init_domains(cpu_active_mask);
mutex_unlock(&sched_domains_mutex);
+ cpus_read_unlock();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_FLAG_DOMAIN)) < 0)
local = 1;
/*
- * Retry task to preferred node migration periodically, in case it
- * case it previously failed, or the scheduler moved us.
+ * Retry to migrate task to preferred node periodically, in case it
+ * previously failed, or the scheduler moved us.
*/
if (time_after(jiffies, p->numa_migrate_retry)) {
task_numa_placement(p);
struct task_cputime cputime;
unsigned long soft;
- if (dl_task(tsk))
- check_dl_overrun(tsk);
-
/*
* If cputimer is not running, then there are no active
* process wide timers (POSIX 1.b, itimers, RLIMIT_CPU).
if (code[1].op != FETCH_OP_IMM)
return -EINVAL;
- tmp = strpbrk("+-", code->data);
+ tmp = strpbrk(code->data, "+-");
if (tmp)
c = *tmp;
ret = traceprobe_split_symbol_offset(code->data,
if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
goto out;
- ret = sort_idmaps(&new_map);
- if (ret < 0)
- goto out;
-
ret = -EPERM;
/* Map the lower ids from the parent user namespace to the
* kernel global id space.
e->lower_first = lower_first;
}
+ /*
+ * If we want to use binary search for lookup, this clones the extent
+ * array and sorts both copies.
+ */
+ ret = sort_idmaps(&new_map);
+ if (ret < 0)
+ goto out;
+
/* Install the map */
if (new_map.nr_extents <= UID_GID_MAP_MAX_BASE_EXTENTS) {
memcpy(map->extent, new_map.extent,
CFLAGS += -I../../../arch/arm/include -mfpu=neon
HAS_NEON = yes
endif
-ifeq ($(ARCH),arm64)
+ifeq ($(ARCH),aarch64)
CFLAGS += -I../../../arch/arm64/include
HAS_NEON = yes
endif
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX512=1)
else ifeq ($(HAS_NEON),yes)
- OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
+ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o recov_neon.o recov_neon_inner.o
CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
HAS_ALTIVEC := $(shell printf '\#include <altivec.h>\nvector int a;\n' |\
}
skb = next;
- if (netif_xmit_stopped(txq) && skb) {
+ if (netif_tx_queue_stopped(txq) && skb) {
rc = NETDEV_TX_BUSY;
break;
}
read_lock_bh(&idev->lock);
list_for_each_entry(ifp, &idev->addr_list, if_list) {
- if (ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL)
+ if (!!(ipv6_addr_type(&ifp->addr) & IPV6_ADDR_LINKLOCAL) !=
+ !!(ipv6_addr_type(&np->remote_ip.in6) & IPV6_ADDR_LINKLOCAL))
continue;
np->local_ip.in6 = ifp->addr;
err = 0;
cb->seq = 0;
}
ret = dumpit(skb, cb);
- if (ret < 0)
+ if (ret)
break;
}
cb->family = idx;
*
* This is a helper to do that correctly considering GSO_BY_FRAGS.
*
+ * @skb: GSO skb
+ *
* @seg_len: The segmented length (from skb_gso_*_seglen). In the
* GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
*
#ifdef CONFIG_INET
if (family == AF_INET &&
+ protocol != IPPROTO_RAW &&
!rcu_access_pointer(inet_protos[protocol]))
return -ENOENT;
#endif
if (ip_is_fragment(&iph)) {
skb = skb_share_check(skb, GFP_ATOMIC);
if (skb) {
- if (!pskb_may_pull(skb, netoff + iph.ihl * 4))
- return skb;
- if (pskb_trim_rcsum(skb, netoff + len))
- return skb;
+ if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) {
+ kfree_skb(skb);
+ return NULL;
+ }
+ if (pskb_trim_rcsum(skb, netoff + len)) {
+ kfree_skb(skb);
+ return NULL;
+ }
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
if (ip_defrag(net, skb, user))
return NULL;
return -ENOPROTOOPT;
err = do_ip_setsockopt(sk, level, optname, optval, optlen);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_SET_REPLACE &&
optname < BPFILTER_IPT_SET_MAX)
err = bpfilter_ip_set_sockopt(sk, optname, optval, optlen);
int err;
err = do_ip_getsockopt(sk, level, optname, optval, optlen, 0);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
err = do_ip_getsockopt(sk, level, optname, optval, optlen,
MSG_CMSG_COMPAT);
-#ifdef CONFIG_BPFILTER
+#if IS_ENABLED(CONFIG_BPFILTER_UMH)
if (optname >= BPFILTER_IPT_SO_GET_INFO &&
optname < BPFILTER_IPT_GET_MAX)
err = bpfilter_ip_get_sockopt(sk, optname, optval, optlen);
err = ip6_flowlabel_init();
if (err)
goto ip6_flowlabel_fail;
+ err = ipv6_anycast_init();
+ if (err)
+ goto ipv6_anycast_fail;
err = addrconf_init();
if (err)
goto addrconf_fail;
ipv6_exthdrs_fail:
addrconf_cleanup();
addrconf_fail:
+ ipv6_anycast_cleanup();
+ipv6_anycast_fail:
ip6_flowlabel_cleanup();
ip6_flowlabel_fail:
ndisc_late_cleanup();
#include <net/checksum.h>
+#define IN6_ADDR_HSIZE_SHIFT 8
+#define IN6_ADDR_HSIZE BIT(IN6_ADDR_HSIZE_SHIFT)
+/* anycast address hash table
+ */
+static struct hlist_head inet6_acaddr_lst[IN6_ADDR_HSIZE];
+static DEFINE_SPINLOCK(acaddr_hash_lock);
+
static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr);
+static u32 inet6_acaddr_hash(struct net *net, const struct in6_addr *addr)
+{
+ u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
+
+ return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
+}
+
/*
* socket join an anycast group
*/
rtnl_unlock();
}
+static void ipv6_add_acaddr_hash(struct net *net, struct ifacaddr6 *aca)
+{
+ unsigned int hash = inet6_acaddr_hash(net, &aca->aca_addr);
+
+ spin_lock(&acaddr_hash_lock);
+ hlist_add_head_rcu(&aca->aca_addr_lst, &inet6_acaddr_lst[hash]);
+ spin_unlock(&acaddr_hash_lock);
+}
+
+static void ipv6_del_acaddr_hash(struct ifacaddr6 *aca)
+{
+ spin_lock(&acaddr_hash_lock);
+ hlist_del_init_rcu(&aca->aca_addr_lst);
+ spin_unlock(&acaddr_hash_lock);
+}
+
static void aca_get(struct ifacaddr6 *aca)
{
refcount_inc(&aca->aca_refcnt);
}
+static void aca_free_rcu(struct rcu_head *h)
+{
+ struct ifacaddr6 *aca = container_of(h, struct ifacaddr6, rcu);
+
+ fib6_info_release(aca->aca_rt);
+ kfree(aca);
+}
+
static void aca_put(struct ifacaddr6 *ac)
{
if (refcount_dec_and_test(&ac->aca_refcnt)) {
- fib6_info_release(ac->aca_rt);
- kfree(ac);
+ call_rcu(&ac->rcu, aca_free_rcu);
}
}
aca->aca_addr = *addr;
fib6_info_hold(f6i);
aca->aca_rt = f6i;
+ INIT_HLIST_NODE(&aca->aca_addr_lst);
aca->aca_users = 1;
/* aca_tstamp should be updated upon changes */
aca->aca_cstamp = aca->aca_tstamp = jiffies;
aca_get(aca);
write_unlock_bh(&idev->lock);
+ ipv6_add_acaddr_hash(net, aca);
+
ip6_ins_rt(net, f6i);
addrconf_join_solict(idev->dev, &aca->aca_addr);
else
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
+ ipv6_del_acaddr_hash(aca);
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
idev->ac_list = aca->aca_next;
write_unlock_bh(&idev->lock);
+ ipv6_del_acaddr_hash(aca);
+
addrconf_leave_solict(idev, &aca->aca_addr);
ip6_del_rt(dev_net(idev->dev), aca->aca_rt);
bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr)
{
+ unsigned int hash = inet6_acaddr_hash(net, addr);
+ struct net_device *nh_dev;
+ struct ifacaddr6 *aca;
bool found = false;
rcu_read_lock();
if (dev)
found = ipv6_chk_acast_dev(dev, addr);
else
- for_each_netdev_rcu(net, dev)
- if (ipv6_chk_acast_dev(dev, addr)) {
+ hlist_for_each_entry_rcu(aca, &inet6_acaddr_lst[hash],
+ aca_addr_lst) {
+ nh_dev = fib6_info_nh_dev(aca->aca_rt);
+ if (!nh_dev || !net_eq(dev_net(nh_dev), net))
+ continue;
+ if (ipv6_addr_equal(&aca->aca_addr, addr)) {
found = true;
break;
}
+ }
rcu_read_unlock();
return found;
}
remove_proc_entry("anycast6", net->proc_net);
}
#endif
+
+/* Init / cleanup code
+ */
+int __init ipv6_anycast_init(void)
+{
+ int i;
+
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ INIT_HLIST_HEAD(&inet6_acaddr_lst[i]);
+ return 0;
+}
+
+void ipv6_anycast_cleanup(void)
+{
+ int i;
+
+ spin_lock(&acaddr_hash_lock);
+ for (i = 0; i < IN6_ADDR_HSIZE; i++)
+ WARN_ON(!hlist_empty(&inet6_acaddr_lst[i]));
+ spin_unlock(&acaddr_hash_lock);
+}
/* fib entries are never clones */
if (arg.filter.flags & RTM_F_CLONED)
- return skb->len;
+ goto out;
w = (void *)cb->args[2];
if (!w) {
tb = fib6_get_table(net, arg.filter.table_id);
if (!tb) {
if (arg.filter.dump_all_families)
- return skb->len;
+ goto out;
NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
return -ENOENT;
*/
ret = -EINPROGRESS;
if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
- fq->q.meat == fq->q.len &&
- nf_ct_frag6_reasm(fq, skb, dev))
- ret = 0;
- else
+ fq->q.meat == fq->q.len) {
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ if (nf_ct_frag6_reasm(fq, skb, dev))
+ ret = 0;
+ skb->_skb_refdst = orefdst;
+ } else {
skb_dst_drop(skb);
+ }
out_unlock:
spin_unlock_bh(&fq->q.lock);
MODULE_DESCRIPTION("core IP set support");
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_IPSET);
-/* When the nfnl mutex is held: */
+/* When the nfnl mutex or ip_set_ref_lock is held: */
#define ip_set_dereference(p) \
- rcu_dereference_protected(p, lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+ rcu_dereference_protected(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+ lockdep_is_held(&ip_set_ref_lock))
#define ip_set(inst, id) \
ip_set_dereference((inst)->ip_set_list)[id]
+#define ip_set_ref_netlink(inst,id) \
+ rcu_dereference_raw((inst)->ip_set_list)[id]
/* The set types are implemented in modules and registered set types
* can be found in ip_set_type_list. Adding/deleting types is
EXPORT_SYMBOL_GPL(ip_set_put_byindex);
/* Get the name of a set behind a set index.
- * We assume the set is referenced, so it does exist and
- * can't be destroyed. The set cannot be renamed due to
- * the referencing either.
- *
+ * Set itself is protected by RCU, but its name isn't: to protect against
+ * renaming, grab ip_set_ref_lock as reader (see ip_set_rename()) and copy the
+ * name.
*/
-const char *
-ip_set_name_byindex(struct net *net, ip_set_id_t index)
+void
+ip_set_name_byindex(struct net *net, ip_set_id_t index, char *name)
{
- const struct ip_set *set = ip_set_rcu_get(net, index);
+ struct ip_set *set = ip_set_rcu_get(net, index);
BUG_ON(!set);
- BUG_ON(set->ref == 0);
- /* Referenced, so it's safe */
- return set->name;
+ read_lock_bh(&ip_set_ref_lock);
+ strncpy(name, set->name, IPSET_MAXNAMELEN);
+ read_unlock_bh(&ip_set_ref_lock);
}
EXPORT_SYMBOL_GPL(ip_set_name_byindex);
/* Wraparound */
goto cleanup;
- list = kcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
+ list = kvcalloc(i, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
goto cleanup;
/* nfnl mutex is held, both lists are valid */
/* Use new list */
index = inst->ip_set_max;
inst->ip_set_max = i;
- kfree(tmp);
+ kvfree(tmp);
ret = 0;
} else if (ret) {
goto cleanup;
if (!set)
return -ENOENT;
- read_lock_bh(&ip_set_ref_lock);
+ write_lock_bh(&ip_set_ref_lock);
if (set->ref != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
strncpy(set->name, name2, IPSET_MAXNAMELEN);
out:
- read_unlock_bh(&ip_set_ref_lock);
+ write_unlock_bh(&ip_set_ref_lock);
return ret;
}
struct ip_set_net *inst =
(struct ip_set_net *)cb->args[IPSET_CB_NET];
ip_set_id_t index = (ip_set_id_t)cb->args[IPSET_CB_INDEX];
- struct ip_set *set = ip_set(inst, index);
+ struct ip_set *set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
release_refcount:
/* If there was an error or set is done, release set */
if (ret || !cb->args[IPSET_CB_ARG0]) {
- set = ip_set(inst, index);
+ set = ip_set_ref_netlink(inst, index);
if (set->variant->uref)
set->variant->uref(set, cb, false);
pr_debug("release set %s\n", set->name);
if (inst->ip_set_max >= IPSET_INVALID_ID)
inst->ip_set_max = IPSET_INVALID_ID - 1;
- list = kcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
+ list = kvcalloc(inst->ip_set_max, sizeof(struct ip_set *), GFP_KERNEL);
if (!list)
return -ENOMEM;
inst->is_deleted = false;
}
}
nfnl_unlock(NFNL_SUBSYS_IPSET);
- kfree(rcu_dereference_protected(inst->ip_set_list, 1));
+ kvfree(rcu_dereference_protected(inst->ip_set_list, 1));
}
static struct pernet_operations ip_set_net_ops = {
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR]) {
e.cidr[0] = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!e.cidr[0] || e.cidr[0] > HOST_MASK)
+ if (e.cidr[0] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
if (tb[IPSET_ATTR_CIDR2]) {
e.cidr[1] = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!e.cidr[1] || e.cidr[1] > HOST_MASK)
+ if (e.cidr[1] > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
{
struct set_elem *e = container_of(rcu, struct set_elem, rcu);
struct ip_set *set = e->set;
- struct list_set *map = set->data;
- ip_set_put_byindex(map->net, e->id);
ip_set_ext_destroy(set, e);
kfree(e);
}
static inline void
list_set_del(struct ip_set *set, struct set_elem *e)
{
+ struct list_set *map = set->data;
+
set->elements--;
list_del_rcu(&e->list);
+ ip_set_put_byindex(map->net, e->id);
call_rcu(&e->rcu, __list_set_del_rcu);
}
static inline void
-list_set_replace(struct set_elem *e, struct set_elem *old)
+list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
{
+ struct list_set *map = set->data;
+
list_replace_rcu(&old->list, &e->list);
+ ip_set_put_byindex(map->net, old->id);
call_rcu(&old->rcu, __list_set_del_rcu);
}
INIT_LIST_HEAD(&e->list);
list_set_init_extensions(set, ext, e);
if (n)
- list_set_replace(e, n);
+ list_set_replace(set, e, n);
else if (next)
list_add_tail_rcu(&e->list, &next->list);
else if (prev)
const struct list_set *map = set->data;
struct nlattr *atd, *nested;
u32 i = 0, first = cb->args[IPSET_CB_ARG0];
+ char name[IPSET_MAXNAMELEN];
struct set_elem *e;
int ret = 0;
nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
if (!nested)
goto nla_put_failure;
- if (nla_put_string(skb, IPSET_ATTR_NAME,
- ip_set_name_byindex(map->net, e->id)))
+ ip_set_name_byindex(map->net, e->id, name);
+ if (nla_put_string(skb, IPSET_ATTR_NAME, name))
goto nla_put_failure;
if (ip_set_put_extensions(skb, set, e, true))
goto nla_put_failure;
return drops;
}
-static noinline int early_drop(struct net *net, unsigned int _hash)
+static noinline int early_drop(struct net *net, unsigned int hash)
{
- unsigned int i;
+ unsigned int i, bucket;
for (i = 0; i < NF_CT_EVICTION_RANGE; i++) {
struct hlist_nulls_head *ct_hash;
- unsigned int hash, hsize, drops;
+ unsigned int hsize, drops;
rcu_read_lock();
nf_conntrack_get_ht(&ct_hash, &hsize);
- hash = reciprocal_scale(_hash++, hsize);
+ if (!i)
+ bucket = reciprocal_scale(hash, hsize);
+ else
+ bucket = (bucket + 1) % hsize;
- drops = early_drop_list(net, &ct_hash[hash]);
+ drops = early_drop_list(net, &ct_hash[bucket]);
rcu_read_unlock();
if (drops) {
},
};
-static inline struct nf_dccp_net *dccp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.dccp;
-}
-
static noinline bool
dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
const struct dccp_hdr *dh)
state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
switch (state) {
default:
- dn = dccp_pernet(net);
+ dn = nf_dccp_pernet(net);
if (dn->dccp_loose == 0) {
msg = "not picking up existing connection ";
goto out_invalid;
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
- timeouts = dccp_pernet(nf_ct_net(ct))->dccp_timeout;
+ timeouts = nf_dccp_pernet(nf_ct_net(ct))->dccp_timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_dccp_net *dn = dccp_pernet(net);
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
unsigned int *timeouts = data;
int i;
static int dccp_init_net(struct net *net)
{
- struct nf_dccp_net *dn = dccp_pernet(net);
+ struct nf_dccp_net *dn = nf_dccp_pernet(net);
struct nf_proto_net *pn = &dn->pn;
if (!pn->users) {
}
}
-static inline struct nf_generic_net *generic_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.generic;
-}
-
static bool generic_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
}
if (!timeout)
- timeout = &generic_pernet(nf_ct_net(ct))->timeout;
+ timeout = &nf_generic_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
static int generic_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_generic_net *gn = nf_generic_pernet(net);
unsigned int *timeout = data;
if (!timeout)
static int generic_init_net(struct net *net)
{
- struct nf_generic_net *gn = generic_pernet(net);
+ struct nf_generic_net *gn = nf_generic_pernet(net);
struct nf_proto_net *pn = &gn->pn;
gn->timeout = nf_ct_generic_timeout;
static const unsigned int nf_ct_icmp_timeout = 30*HZ;
-static inline struct nf_icmp_net *icmp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmp;
-}
-
static bool icmp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff,
struct net *net, struct nf_conntrack_tuple *tuple)
{
}
if (!timeout)
- timeout = &icmp_pernet(nf_ct_net(ct))->timeout;
+ timeout = &nf_icmp_pernet(nf_ct_net(ct))->timeout;
nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
struct net *net, void *data)
{
unsigned int *timeout = data;
- struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_icmp_net *in = nf_icmp_pernet(net);
if (tb[CTA_TIMEOUT_ICMP_TIMEOUT]) {
if (!timeout)
static int icmp_init_net(struct net *net)
{
- struct nf_icmp_net *in = icmp_pernet(net);
+ struct nf_icmp_net *in = nf_icmp_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmp_timeout;
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
-static inline struct nf_icmp_net *icmpv6_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.icmpv6;
-}
-
static bool icmpv6_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
struct net *net,
static unsigned int *icmpv6_get_timeouts(struct net *net)
{
- return &icmpv6_pernet(net)->timeout;
+ return &nf_icmpv6_pernet(net)->timeout;
}
/* Returns verdict for packet, or -1 for invalid. */
struct net *net, void *data)
{
unsigned int *timeout = data;
- struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_icmp_net *in = nf_icmpv6_pernet(net);
if (!timeout)
timeout = icmpv6_get_timeouts(net);
static int icmpv6_init_net(struct net *net)
{
- struct nf_icmp_net *in = icmpv6_pernet(net);
+ struct nf_icmp_net *in = nf_icmpv6_pernet(net);
struct nf_proto_net *pn = &in->pn;
in->timeout = nf_ct_icmpv6_timeout;
}
};
-static inline struct nf_sctp_net *sctp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.sctp;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void sctp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
timeouts = nf_ct_timeout_lookup(ct);
if (!timeouts)
- timeouts = sctp_pernet(nf_ct_net(ct))->timeouts;
+ timeouts = nf_sctp_pernet(nf_ct_net(ct))->timeouts;
nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct nf_sctp_net *sn = sctp_pernet(net);
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
int i;
/* set default SCTP timeouts. */
static int sctp_init_net(struct net *net)
{
- struct nf_sctp_net *sn = sctp_pernet(net);
+ struct nf_sctp_net *sn = nf_sctp_pernet(net);
struct nf_proto_net *pn = &sn->pn;
if (!pn->users) {
}
};
-static inline struct nf_tcp_net *tcp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.tcp;
-}
-
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
static void tcp_print_conntrack(struct seq_file *s, struct nf_conn *ct)
const struct tcphdr *tcph)
{
struct net *net = nf_ct_net(ct);
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct ip_ct_tcp_state *sender = &state->seen[dir];
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
{
enum tcp_conntrack new_state;
struct net *net = nf_ct_net(ct);
- const struct nf_tcp_net *tn = tcp_pernet(net);
+ const struct nf_tcp_net *tn = nf_tcp_pernet(net);
const struct ip_ct_tcp_state *sender = &ct->proto.tcp.seen[0];
const struct ip_ct_tcp_state *receiver = &ct->proto.tcp.seen[1];
const struct nf_hook_state *state)
{
struct net *net = nf_ct_net(ct);
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_conntrack_tuple *tuple;
enum tcp_conntrack new_state, old_state;
unsigned int index, *timeouts;
static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
struct net *net, void *data)
{
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
unsigned int *timeouts = data;
int i;
static int tcp_init_net(struct net *net)
{
- struct nf_tcp_net *tn = tcp_pernet(net);
+ struct nf_tcp_net *tn = nf_tcp_pernet(net);
struct nf_proto_net *pn = &tn->pn;
if (!pn->users) {
[UDP_CT_REPLIED] = 180*HZ,
};
-static inline struct nf_udp_net *udp_pernet(struct net *net)
-{
- return &net->ct.nf_ct_proto.udp;
-}
-
static unsigned int *udp_get_timeouts(struct net *net)
{
- return udp_pernet(net)->timeouts;
+ return nf_udp_pernet(net)->timeouts;
}
static void udp_error_log(const struct sk_buff *skb,
struct net *net, void *data)
{
unsigned int *timeouts = data;
- struct nf_udp_net *un = udp_pernet(net);
+ struct nf_udp_net *un = nf_udp_pernet(net);
if (!timeouts)
timeouts = un->timeouts;
static int udp_init_net(struct net *net)
{
- struct nf_udp_net *un = udp_pernet(net);
+ struct nf_udp_net *un = nf_udp_pernet(net);
struct nf_proto_net *pn = &un->pn;
if (!pn->users) {
static int
cttimeout_default_fill_info(struct net *net, struct sk_buff *skb, u32 portid,
u32 seq, u32 type, int event, u16 l3num,
- const struct nf_conntrack_l4proto *l4proto)
+ const struct nf_conntrack_l4proto *l4proto,
+ const unsigned int *timeouts)
{
struct nlmsghdr *nlh;
struct nfgenmsg *nfmsg;
if (!nest_parms)
goto nla_put_failure;
- ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, NULL);
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, timeouts);
if (ret < 0)
goto nla_put_failure;
struct netlink_ext_ack *extack)
{
const struct nf_conntrack_l4proto *l4proto;
+ unsigned int *timeouts = NULL;
struct sk_buff *skb2;
int ret, err;
__u16 l3num;
l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
l4proto = nf_ct_l4proto_find_get(l4num);
- /* This protocol is not supported, skip. */
- if (l4proto->l4proto != l4num) {
- err = -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ if (l4proto->l4proto != l4num)
goto err;
+
+ switch (l4proto->l4proto) {
+ case IPPROTO_ICMP:
+ timeouts = &nf_icmp_pernet(net)->timeout;
+ break;
+ case IPPROTO_TCP:
+ timeouts = nf_tcp_pernet(net)->timeouts;
+ break;
+ case IPPROTO_UDP:
+ timeouts = nf_udp_pernet(net)->timeouts;
+ break;
+ case IPPROTO_DCCP:
+#ifdef CONFIG_NF_CT_PROTO_DCCP
+ timeouts = nf_dccp_pernet(net)->dccp_timeout;
+#endif
+ break;
+ case IPPROTO_ICMPV6:
+ timeouts = &nf_icmpv6_pernet(net)->timeout;
+ break;
+ case IPPROTO_SCTP:
+#ifdef CONFIG_NF_CT_PROTO_SCTP
+ timeouts = nf_sctp_pernet(net)->timeouts;
+#endif
+ break;
+ case 255:
+ timeouts = &nf_generic_pernet(net)->timeout;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
+ if (!timeouts)
+ goto err;
+
skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (skb2 == NULL) {
err = -ENOMEM;
nlh->nlmsg_seq,
NFNL_MSG_TYPE(nlh->nlmsg_type),
IPCTNL_MSG_TIMEOUT_DEFAULT_SET,
- l3num,
- l4proto);
+ l3num, l4proto, timeouts);
if (ret <= 0) {
kfree_skb(skb2);
err = -ENOMEM;
return false;
}
-static int nft_compat_chain_validate_dependency(const char *tablename,
- const struct nft_chain *chain)
+static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
+ const char *tablename)
{
+ enum nft_chain_types type = NFT_CHAIN_T_DEFAULT;
+ const struct nft_chain *chain = ctx->chain;
const struct nft_base_chain *basechain;
if (!tablename ||
return 0;
basechain = nft_base_chain(chain);
- if (strcmp(tablename, "nat") == 0 &&
- basechain->type->type != NFT_CHAIN_T_NAT)
- return -EINVAL;
+ if (strcmp(tablename, "nat") == 0) {
+ if (ctx->family != NFPROTO_BRIDGE)
+ type = NFT_CHAIN_T_NAT;
+ if (basechain->type->type != type)
+ return -EINVAL;
+ }
return 0;
}
if (target->hooks && !(hook_mask & target->hooks))
return -EINVAL;
- ret = nft_compat_chain_validate_dependency(target->table,
- ctx->chain);
+ ret = nft_compat_chain_validate_dependency(ctx, target->table);
if (ret < 0)
return ret;
}
if (match->hooks && !(hook_mask & match->hooks))
return -EINVAL;
- ret = nft_compat_chain_validate_dependency(match->table,
- ctx->chain);
+ ret = nft_compat_chain_validate_dependency(ctx, match->table);
if (ret < 0)
return ret;
}
u32 modulus;
atomic_t counter;
u32 offset;
- struct nft_set *map;
};
static u32 nft_ng_inc_gen(struct nft_ng_inc *priv)
regs->data[priv->dreg] = nft_ng_inc_gen(priv);
}
-static void nft_ng_inc_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_ng_inc *priv = nft_expr_priv(expr);
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = nft_ng_inc_gen(priv);
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
-
- if (!found)
- return;
-
- nft_data_copy(®s->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static const struct nla_policy nft_ng_policy[NFTA_NG_MAX + 1] = {
[NFTA_NG_DREG] = { .type = NLA_U32 },
[NFTA_NG_MODULUS] = { .type = NLA_U32 },
[NFTA_NG_TYPE] = { .type = NLA_U32 },
[NFTA_NG_OFFSET] = { .type = NLA_U32 },
- [NFTA_NG_SET_NAME] = { .type = NLA_STRING,
- .len = NFT_SET_MAXNAMELEN - 1 },
- [NFTA_NG_SET_ID] = { .type = NLA_U32 },
};
static int nft_ng_inc_init(const struct nft_ctx *ctx,
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_ng_inc_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_ng_inc *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_ng_inc_init(ctx, expr, tb);
-
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_NG_SET_NAME],
- tb[NFTA_NG_SET_ID], genmask);
-
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_ng_dump(struct sk_buff *skb, enum nft_registers dreg,
u32 modulus, enum nft_ng_types type, u32 offset)
{
priv->offset);
}
-static int nft_ng_inc_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_ng_inc *priv = nft_expr_priv(expr);
-
- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
- NFT_NG_INCREMENTAL, priv->offset) ||
- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
- goto nla_put_failure;
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
struct nft_ng_random {
enum nft_registers dreg:8;
u32 modulus;
u32 offset;
- struct nft_set *map;
};
static u32 nft_ng_random_gen(struct nft_ng_random *priv)
regs->data[priv->dreg] = nft_ng_random_gen(priv);
}
-static void nft_ng_random_map_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
-{
- struct nft_ng_random *priv = nft_expr_priv(expr);
- const struct nft_set *map = priv->map;
- const struct nft_set_ext *ext;
- u32 result;
- bool found;
-
- result = nft_ng_random_gen(priv);
- found = map->ops->lookup(nft_net(pkt), map, &result, &ext);
- if (!found)
- return;
-
- nft_data_copy(®s->data[priv->dreg],
- nft_set_ext_data(ext), map->dlen);
-}
-
static int nft_ng_random_init(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nlattr * const tb[])
NFT_DATA_VALUE, sizeof(u32));
}
-static int nft_ng_random_map_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
-{
- struct nft_ng_random *priv = nft_expr_priv(expr);
- u8 genmask = nft_genmask_next(ctx->net);
-
- nft_ng_random_init(ctx, expr, tb);
- priv->map = nft_set_lookup_global(ctx->net, ctx->table,
- tb[NFTA_NG_SET_NAME],
- tb[NFTA_NG_SET_ID], genmask);
-
- return PTR_ERR_OR_ZERO(priv->map);
-}
-
static int nft_ng_random_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_ng_random *priv = nft_expr_priv(expr);
priv->offset);
}
-static int nft_ng_random_map_dump(struct sk_buff *skb,
- const struct nft_expr *expr)
-{
- const struct nft_ng_random *priv = nft_expr_priv(expr);
-
- if (nft_ng_dump(skb, priv->dreg, priv->modulus,
- NFT_NG_RANDOM, priv->offset) ||
- nla_put_string(skb, NFTA_NG_SET_NAME, priv->map->name))
- goto nla_put_failure;
-
- return 0;
-
-nla_put_failure:
- return -1;
-}
-
static struct nft_expr_type nft_ng_type;
static const struct nft_expr_ops nft_ng_inc_ops = {
.type = &nft_ng_type,
.dump = nft_ng_inc_dump,
};
-static const struct nft_expr_ops nft_ng_inc_map_ops = {
- .type = &nft_ng_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_inc)),
- .eval = nft_ng_inc_map_eval,
- .init = nft_ng_inc_map_init,
- .dump = nft_ng_inc_map_dump,
-};
-
static const struct nft_expr_ops nft_ng_random_ops = {
.type = &nft_ng_type,
.size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
.dump = nft_ng_random_dump,
};
-static const struct nft_expr_ops nft_ng_random_map_ops = {
- .type = &nft_ng_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_ng_random)),
- .eval = nft_ng_random_map_eval,
- .init = nft_ng_random_map_init,
- .dump = nft_ng_random_map_dump,
-};
-
static const struct nft_expr_ops *
nft_ng_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
{
switch (type) {
case NFT_NG_INCREMENTAL:
- if (tb[NFTA_NG_SET_NAME])
- return &nft_ng_inc_map_ops;
return &nft_ng_inc_ops;
case NFT_NG_RANDOM:
- if (tb[NFTA_NG_SET_NAME])
- return &nft_ng_random_map_ops;
return &nft_ng_random_ops;
}
int err;
u8 ttl;
- if (nla_get_u8(tb[NFTA_OSF_TTL])) {
+ if (tb[NFTA_OSF_TTL]) {
ttl = nla_get_u8(tb[NFTA_OSF_TTL]);
if (ttl > 2)
return -EINVAL;
schedule_work(&timer->work);
}
+static int idletimer_check_sysfs_name(const char *name, unsigned int size)
+{
+ int ret;
+
+ ret = xt_check_proc_name(name, size);
+ if (ret < 0)
+ return ret;
+
+ if (!strcmp(name, "power") ||
+ !strcmp(name, "subsystem") ||
+ !strcmp(name, "uevent"))
+ return -EINVAL;
+
+ return 0;
+}
+
static int idletimer_tg_create(struct idletimer_tg_info *info)
{
int ret;
goto out;
}
+ ret = idletimer_check_sysfs_name(info->label, sizeof(info->label));
+ if (ret < 0)
+ goto out_free_timer;
+
sysfs_attr_init(&info->timer->attr.attr);
info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
if (!info->timer->attr.attr.name) {
&info->labels.mask);
if (err)
return err;
- } else if (labels_nonzero(&info->labels.mask)) {
+ } else if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
+ labels_nonzero(&info->labels.mask)) {
err = ovs_ct_set_labels(ct, key, &info->labels.value,
&info->labels.mask);
if (err)
* not hard-ACK'd packet follows this.
*/
rxrpc_seq_t tx_top; /* Highest Tx slot allocated. */
+ u16 tx_backoff; /* Delay to insert due to Tx failure */
/* TCP-style slow-start congestion control [RFC5681]. Since the SMSS
* is fixed, we keep these numbers in terms of segments (ie. DATA
else
ack_at = expiry;
+ ack_at += READ_ONCE(call->tx_backoff);
ack_at += now;
if (time_before(ack_at, call->ack_at)) {
WRITE_ONCE(call->ack_at, ack_at);
container_of(work, struct rxrpc_call, processor);
rxrpc_serial_t *send_ack;
unsigned long now, next, t;
+ unsigned int iterations = 0;
rxrpc_see_call(call);
call->debug_id, rxrpc_call_states[call->state], call->events);
recheck_state:
+ /* Limit the number of times we do this before returning to the manager */
+ iterations++;
+ if (iterations > 5)
+ goto requeue;
+
if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
rxrpc_send_abort_packet(call);
goto recheck_state;
rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */
- if (call->events && call->state < RXRPC_CALL_COMPLETE) {
- __rxrpc_queue_call(call);
- goto out;
- }
+ if (call->events && call->state < RXRPC_CALL_COMPLETE)
+ goto requeue;
out_put:
rxrpc_put_call(call, rxrpc_call_put);
out:
_leave("");
+ return;
+
+requeue:
+ __rxrpc_queue_call(call);
+ goto out;
}
static const char rxrpc_keepalive_string[] = "";
+/*
+ * Increase Tx backoff on transmission failure and clear it on success.
+ */
+static void rxrpc_tx_backoff(struct rxrpc_call *call, int ret)
+{
+ if (ret < 0) {
+ u16 tx_backoff = READ_ONCE(call->tx_backoff);
+
+ if (tx_backoff < HZ)
+ WRITE_ONCE(call->tx_backoff, tx_backoff + 1);
+ } else {
+ WRITE_ONCE(call->tx_backoff, 0);
+ }
+}
+
/*
* Arrange for a keepalive ping a certain time after we last transmitted. This
* lets the far side know we're still interested in this call and helps keep
else
trace_rxrpc_tx_packet(call->debug_id, &pkt->whdr,
rxrpc_tx_point_call_ack);
+ rxrpc_tx_backoff(call, ret);
if (call->state < RXRPC_CALL_COMPLETE) {
if (ret < 0) {
rxrpc_propose_ACK(call, pkt->ack.reason,
ntohs(pkt->ack.maxSkew),
ntohl(pkt->ack.serial),
- true, true,
+ false, true,
rxrpc_propose_ack_retry_tx);
} else {
spin_lock_bh(&call->lock);
else
trace_rxrpc_tx_packet(call->debug_id, &pkt.whdr,
rxrpc_tx_point_call_abort);
-
+ rxrpc_tx_backoff(call, ret);
rxrpc_put_connection(conn);
return ret;
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_nofrag);
+ rxrpc_tx_backoff(call, ret);
if (ret == -EMSGSIZE)
goto send_fragmentable;
rxrpc_reduce_call_timer(call, expect_rx_by, nowj,
rxrpc_timer_set_for_normal);
}
- }
- rxrpc_set_keepalive(call);
+ rxrpc_set_keepalive(call);
+ } else {
+ /* Cancel the call if the initial transmission fails,
+ * particularly if that's due to network routing issues that
+ * aren't going away anytime soon. The layer above can arrange
+ * the retransmission.
+ */
+ if (!test_and_set_bit(RXRPC_CALL_BEGAN_RX_TIMER, &call->flags))
+ rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
+ RX_USER_ABORT, ret);
+ }
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
else
trace_rxrpc_tx_packet(call->debug_id, &whdr,
rxrpc_tx_point_call_data_frag);
+ rxrpc_tx_backoff(call, ret);
up_write(&conn->params.local->defrag_sem);
goto done;
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
- sctp_sched_set_sched(asoc, SCTP_SS_FCFS);
+ sctp_sched_set_sched(asoc, SCTP_SS_DEFAULT);
}
/* Free the outqueue structure and any related pending chunks.
removefunc = false;
}
if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0 &&
- snd_hda_gen_add_micmute_led(codec,
- update_tpacpi_micmute) > 0)
+ !snd_hda_gen_add_micmute_led(codec,
+ update_tpacpi_micmute))
removefunc = false;
}
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
-#define smp_store_release(p, v) \
-do { \
- union { typeof(*p) __val; char __c[1]; } __u = \
- { .__val = (__force typeof(*p)) (v) }; \
- \
- switch (sizeof(*p)) { \
- case 1: \
- asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u8 *)__u.__c) \
- : "memory"); \
- break; \
- case 2: \
- asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u16 *)__u.__c) \
- : "memory"); \
- break; \
- case 4: \
- asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u32 *)__u.__c) \
- : "memory"); \
- break; \
- case 8: \
- asm volatile ("stlr %1, %0" \
- : "=Q" (*p) \
- : "r" (*(__u64 *)__u.__c) \
- : "memory"); \
- break; \
- default: \
- /* Only to shut up gcc ... */ \
- mb(); \
- break; \
- } \
+#define smp_store_release(p, v) \
+do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (v) }; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u8_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u16_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("stlr %w1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u32_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("stlr %1, %0" \
+ : "=Q" (*p) \
+ : "r" (*(__u64_alias_t *)__u.__c) \
+ : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
} while (0)
-#define smp_load_acquire(p) \
-({ \
- union { typeof(*p) __val; char __c[1]; } __u; \
- \
- switch (sizeof(*p)) { \
- case 1: \
- asm volatile ("ldarb %w0, %1" \
- : "=r" (*(__u8 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 2: \
- asm volatile ("ldarh %w0, %1" \
- : "=r" (*(__u16 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 4: \
- asm volatile ("ldar %w0, %1" \
- : "=r" (*(__u32 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- case 8: \
- asm volatile ("ldar %0, %1" \
- : "=r" (*(__u64 *)__u.__c) \
- : "Q" (*p) : "memory"); \
- break; \
- default: \
- /* Only to shut up gcc ... */ \
- mb(); \
- break; \
- } \
- __u.__val; \
+#define smp_load_acquire(p) \
+({ \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__c = { 0 } }; \
+ \
+ switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("ldarb %w0, %1" \
+ : "=r" (*(__u8_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("ldarh %w0, %1" \
+ : "=r" (*(__u16_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 4: \
+ asm volatile ("ldar %w0, %1" \
+ : "=r" (*(__u32_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ case 8: \
+ asm volatile ("ldar %0, %1" \
+ : "=r" (*(__u64_alias_t *)__u.__c) \
+ : "Q" (*p) : "memory"); \
+ break; \
+ default: \
+ /* Only to shut up gcc ... */ \
+ mb(); \
+ break; \
+ } \
+ __u.__val; \
})
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
S - read sample value (PERF_SAMPLE_READ)
D - pin the event to the PMU
W - group is weak and will fallback to non-group if not schedulable,
- only supported in 'perf stat' for now.
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
linux_uapi_dir := $(srctree)/tools/include/uapi/linux
asm_generic_uapi_dir := $(srctree)/tools/include/uapi/asm-generic
-arch_asm_uapi_dir := $(srctree)/tools/arch/$(ARCH)/include/uapi/asm/
+arch_asm_uapi_dir := $(srctree)/tools/arch/$(SRCARCH)/include/uapi/asm/
beauty_outdir := $(OUTPUT)trace/beauty/generated
beauty_ioctl_outdir := $(beauty_outdir)/ioctl
ui__warning("%s\n", msg);
goto try_again;
}
-
+ if ((errno == EINVAL || errno == EBADF) &&
+ pos->leader != pos &&
+ pos->weak_group) {
+ pos = perf_evlist__reset_weak_group(evlist, pos);
+ goto try_again;
+ }
rc = -errno;
perf_evsel__open_strerror(pos, &opts->target,
errno, msg, sizeof(msg));
return STAT_RECORD || counter->attr.read_format & PERF_FORMAT_ID;
}
-static struct perf_evsel *perf_evsel__reset_weak_group(struct perf_evsel *evsel)
-{
- struct perf_evsel *c2, *leader;
- bool is_open = true;
-
- leader = evsel->leader;
- pr_debug("Weak group for %s/%d failed\n",
- leader->name, leader->nr_members);
-
- /*
- * for_each_group_member doesn't work here because it doesn't
- * include the first entry.
- */
- evlist__for_each_entry(evsel_list, c2) {
- if (c2 == evsel)
- is_open = false;
- if (c2->leader == leader) {
- if (is_open)
- perf_evsel__close(c2);
- c2->leader = c2;
- c2->nr_members = 0;
- }
- }
- return leader;
-}
-
static bool is_target_alive(struct target *_target,
struct thread_map *threads)
{
if ((errno == EINVAL || errno == EBADF) &&
counter->leader != counter &&
counter->weak_group) {
- counter = perf_evsel__reset_weak_group(counter);
+ counter = perf_evlist__reset_weak_group(evsel_list, counter);
goto try_again;
}
}
}
+ if (opts->branch_stack && callchain_param.enabled)
+ symbol_conf.show_branchflag_count = true;
+
sort__mode = SORT_MODE__TOP;
/* display thread wants entries to be collapsed in a different tree */
perf_hpp_list.need_collapse = 1;
} stats;
unsigned int max_stack;
unsigned int min_stack;
+ bool raw_augmented_syscalls;
bool not_ev_qualifier;
bool live;
bool full_time;
return printed;
}
-static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size)
+static void *syscall__augmented_args(struct syscall *sc, struct perf_sample *sample, int *augmented_args_size, bool raw_augmented)
{
void *augmented_args = NULL;
+ /*
+ * For now with BPF raw_augmented we hook into raw_syscalls:sys_enter
+ * and there we get all 6 syscall args plus the tracepoint common
+ * fields (sizeof(long)) and the syscall_nr (another long). So we check
+ * if that is the case and if so don't look after the sc->args_size,
+ * but always after the full raw_syscalls:sys_enter payload, which is
+ * fixed.
+ *
+ * We'll revisit this later to pass s->args_size to the BPF augmenter
+ * (now tools/perf/examples/bpf/augmented_raw_syscalls.c, so that it
+ * copies only what we need for each syscall, like what happens when we
+ * use syscalls:sys_enter_NAME, so that we reduce the kernel/userspace
+ * traffic to just what is needed for each syscall.
+ */
+ int args_size = raw_augmented ? (8 * (int)sizeof(long)) : sc->args_size;
- *augmented_args_size = sample->raw_size - sc->args_size;
+ *augmented_args_size = sample->raw_size - args_size;
if (*augmented_args_size > 0)
- augmented_args = sample->raw_data + sc->args_size;
+ augmented_args = sample->raw_data + args_size;
return augmented_args;
}
* here and avoid using augmented syscalls when the evsel is the raw_syscalls one.
*/
if (evsel != trace->syscalls.events.sys_enter)
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
ttrace->entry_time = sample->time;
msg = ttrace->entry_str;
printed += scnprintf(msg + printed, trace__entry_str_size - printed, "%s(", sc->name);
goto out_put;
args = perf_evsel__sc_tp_ptr(evsel, args, sample);
- augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size);
+ augmented_args = syscall__augmented_args(sc, sample, &augmented_args_size, trace->raw_augmented_syscalls);
syscall__scnprintf_args(sc, msg, sizeof(msg), args, augmented_args, augmented_args_size, trace, thread);
fprintf(trace->output, "%s", msg);
err = 0;
evsel->handler = trace__sys_enter;
evlist__for_each_entry(trace.evlist, evsel) {
+ bool raw_syscalls_sys_exit = strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_exit") == 0;
+
+ if (raw_syscalls_sys_exit) {
+ trace.raw_augmented_syscalls = true;
+ goto init_augmented_syscall_tp;
+ }
+
if (strstarts(perf_evsel__name(evsel), "syscalls:sys_exit_")) {
+init_augmented_syscall_tp:
perf_evsel__init_augmented_syscall_tp(evsel);
perf_evsel__init_augmented_syscall_tp_ret(evsel);
evsel->handler = trace__sys_exit;
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
+ *
+ * Test it with:
+ *
+ * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
+ *
+ * This exactly matches what is marshalled into the raw_syscall:sys_enter
+ * payload expected by the 'perf trace' beautifiers.
+ *
+ * For now it just uses the existing tracepoint augmentation code in 'perf
+ * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
+ * code that will combine entry/exit in a strace like way.
+ */
+
+#include <stdio.h>
+#include <linux/socket.h>
+
+/* bpf-output associated map */
+struct bpf_map SEC("maps") __augmented_syscalls__ = {
+ .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
+ .key_size = sizeof(int),
+ .value_size = sizeof(u32),
+ .max_entries = __NR_CPUS__,
+};
+
+struct syscall_enter_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ unsigned long args[6];
+};
+
+struct syscall_exit_args {
+ unsigned long long common_tp_fields;
+ long syscall_nr;
+ long ret;
+};
+
+struct augmented_filename {
+ unsigned int size;
+ int reserved;
+ char value[256];
+};
+
+#define SYS_OPEN 2
+#define SYS_OPENAT 257
+
+SEC("raw_syscalls:sys_enter")
+int sys_enter(struct syscall_enter_args *args)
+{
+ struct {
+ struct syscall_enter_args args;
+ struct augmented_filename filename;
+ } augmented_args;
+ unsigned int len = sizeof(augmented_args);
+ const void *filename_arg = NULL;
+
+ probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
+ /*
+ * Yonghong and Edward Cree sayz:
+ *
+ * https://www.spinics.net/lists/netdev/msg531645.html
+ *
+ * >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
+ * >> 10: (bf) r1 = r6
+ * >> 11: (07) r1 += 16
+ * >> 12: (05) goto pc+2
+ * >> 15: (79) r3 = *(u64 *)(r1 +0)
+ * >> dereference of modified ctx ptr R1 off=16 disallowed
+ * > Aha, we at least got a different error message this time.
+ * > And indeed llvm has done that optimisation, rather than the more obvious
+ * > 11: r3 = *(u64 *)(r1 +16)
+ * > because it wants to have lots of reads share a single insn. You may be able
+ * > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
+ * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
+ * > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
+ *
+ * The optimization mostly likes below:
+ *
+ * br1:
+ * ...
+ * r1 += 16
+ * goto merge
+ * br2:
+ * ...
+ * r1 += 20
+ * goto merge
+ * merge:
+ * *(u64 *)(r1 + 0)
+ *
+ * The compiler tries to merge common loads. There is no easy way to
+ * stop this compiler optimization without turning off a lot of other
+ * optimizations. The easiest way is to add barriers:
+ *
+ * __asm__ __volatile__("": : :"memory")
+ *
+ * after the ctx memory access to prevent their down stream merging.
+ */
+ switch (augmented_args.args.syscall_nr) {
+ case SYS_OPEN: filename_arg = (const void *)args->args[0];
+ __asm__ __volatile__("": : :"memory");
+ break;
+ case SYS_OPENAT: filename_arg = (const void *)args->args[1];
+ break;
+ }
+
+ if (filename_arg != NULL) {
+ augmented_args.filename.reserved = 0;
+ augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
+ sizeof(augmented_args.filename.value),
+ filename_arg);
+ if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
+ len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
+ len &= sizeof(augmented_args.filename.value) - 1;
+ }
+ } else {
+ len = sizeof(augmented_args.args);
+ }
+
+ perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
+ return 0;
+}
+
+SEC("raw_syscalls:sys_exit")
+int sys_exit(struct syscall_exit_args *args)
+{
+ return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
+}
+
+license(GPL);
}
static int
-debug_cache_init(void)
+create_jit_cache_dir(void)
{
char str[32];
char *base, *p;
strftime(str, sizeof(str), JIT_LANG"-jit-%Y%m%d", &tm);
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/", base);
-
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/", base);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because %s/.debug/"
+ " is too long, please check the cwd, JITDUMPDIR, and"
+ " HOME variables", base);
+ return -1;
+ }
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
}
}
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit", base);
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit", base);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because"
+ " %s/.debug/jit is too long, please check the cwd,"
+ " JITDUMPDIR, and HOME variables", base);
+ return -1;
+ }
ret = mkdir(jit_path, 0755);
if (ret == -1) {
if (errno != EEXIST) {
- warn("cannot create jit cache dir %s", jit_path);
+ warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
}
- snprintf(jit_path, PATH_MAX - 1, "%s/.debug/jit/%s.XXXXXXXX", base, str);
-
+ ret = snprintf(jit_path, PATH_MAX, "%s/.debug/jit/%s.XXXXXXXX", base, str);
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jit cache dir because"
+ " %s/.debug/jit/%s.XXXXXXXX is too long, please check"
+ " the cwd, JITDUMPDIR, and HOME variables",
+ base, str);
+ return -1;
+ }
p = mkdtemp(jit_path);
if (p != jit_path) {
- warn("cannot create jit cache dir %s", jit_path);
+ warn("jvmti: cannot create jit cache dir %s", jit_path);
return -1;
}
{
char dump_path[PATH_MAX];
struct jitheader header;
- int fd;
+ int fd, ret;
FILE *fp;
init_arch_timestamp();
memset(&header, 0, sizeof(header));
- debug_cache_init();
+ /*
+ * jitdump file dir
+ */
+ if (create_jit_cache_dir() < 0)
+ return NULL;
/*
* jitdump file name
*/
- scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+ ret = snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+ if (ret >= PATH_MAX) {
+ warnx("jvmti: cannot generate jitdump file full path because"
+ " %s/jit-%i.dump is too long, please check the cwd,"
+ " JITDUMPDIR, and HOME variables", jit_path, getpid());
+ return NULL;
+ }
fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
if (fd == -1)
return "[kernel]"
return name
+def findnth(s, sub, n, offs=0):
+ pos = s.find(sub)
+ if pos < 0:
+ return pos
+ if n <= 1:
+ return offs + pos
+ return findnth(s[pos + 1:], sub, n - 1, offs + pos + 1)
+
# Percent to one decimal place
def PercentToOneDP(n, d):
else:
self.find_bar.NotFound()
+# Dialog data item converted and validated using a SQL table
+
+class SQLTableDialogDataItem():
+
+ def __init__(self, glb, label, placeholder_text, table_name, match_column, column_name1, column_name2, parent):
+ self.glb = glb
+ self.label = label
+ self.placeholder_text = placeholder_text
+ self.table_name = table_name
+ self.match_column = match_column
+ self.column_name1 = column_name1
+ self.column_name2 = column_name2
+ self.parent = parent
+
+ self.value = ""
+
+ self.widget = QLineEdit()
+ self.widget.editingFinished.connect(self.Validate)
+ self.widget.textChanged.connect(self.Invalidate)
+ self.red = False
+ self.error = ""
+ self.validated = True
+
+ self.last_id = 0
+ self.first_time = 0
+ self.last_time = 2 ** 64
+ if self.table_name == "<timeranges>":
+ query = QSqlQuery(self.glb.db)
+ QueryExec(query, "SELECT id, time FROM samples ORDER BY id DESC LIMIT 1")
+ if query.next():
+ self.last_id = int(query.value(0))
+ self.last_time = int(query.value(1))
+ QueryExec(query, "SELECT time FROM samples WHERE time != 0 ORDER BY id LIMIT 1")
+ if query.next():
+ self.first_time = int(query.value(0))
+ if placeholder_text:
+ placeholder_text += ", between " + str(self.first_time) + " and " + str(self.last_time)
+
+ if placeholder_text:
+ self.widget.setPlaceholderText(placeholder_text)
+
+ def ValueToIds(self, value):
+ ids = []
+ query = QSqlQuery(self.glb.db)
+ stmt = "SELECT id FROM " + self.table_name + " WHERE " + self.match_column + " = '" + value + "'"
+ ret = query.exec_(stmt)
+ if ret:
+ while query.next():
+ ids.append(str(query.value(0)))
+ return ids
+
+ def IdBetween(self, query, lower_id, higher_id, order):
+ QueryExec(query, "SELECT id FROM samples WHERE id > " + str(lower_id) + " AND id < " + str(higher_id) + " ORDER BY id " + order + " LIMIT 1")
+ if query.next():
+ return True, int(query.value(0))
+ else:
+ return False, 0
+
+ def BinarySearchTime(self, lower_id, higher_id, target_time, get_floor):
+ query = QSqlQuery(self.glb.db)
+ while True:
+ next_id = int((lower_id + higher_id) / 2)
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ if not query.next():
+ ok, dbid = self.IdBetween(query, lower_id, next_id, "DESC")
+ if not ok:
+ ok, dbid = self.IdBetween(query, next_id, higher_id, "")
+ if not ok:
+ return str(higher_id)
+ next_id = dbid
+ QueryExec(query, "SELECT time FROM samples WHERE id = " + str(next_id))
+ next_time = int(query.value(0))
+ if get_floor:
+ if target_time > next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(higher_id)
+ else:
+ if target_time >= next_time:
+ lower_id = next_id
+ else:
+ higher_id = next_id
+ if higher_id <= lower_id + 1:
+ return str(lower_id)
+
+ def ConvertRelativeTime(self, val):
+ print "val ", val
+ mult = 1
+ suffix = val[-2:]
+ if suffix == "ms":
+ mult = 1000000
+ elif suffix == "us":
+ mult = 1000
+ elif suffix == "ns":
+ mult = 1
+ else:
+ return val
+ val = val[:-2].strip()
+ if not self.IsNumber(val):
+ return val
+ val = int(val) * mult
+ if val >= 0:
+ val += self.first_time
+ else:
+ val += self.last_time
+ return str(val)
+
+ def ConvertTimeRange(self, vrange):
+ print "vrange ", vrange
+ if vrange[0] == "":
+ vrange[0] = str(self.first_time)
+ if vrange[1] == "":
+ vrange[1] = str(self.last_time)
+ vrange[0] = self.ConvertRelativeTime(vrange[0])
+ vrange[1] = self.ConvertRelativeTime(vrange[1])
+ print "vrange2 ", vrange
+ if not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return False
+ print "ok1"
+ beg_range = max(int(vrange[0]), self.first_time)
+ end_range = min(int(vrange[1]), self.last_time)
+ if beg_range > self.last_time or end_range < self.first_time:
+ return False
+ print "ok2"
+ vrange[0] = self.BinarySearchTime(0, self.last_id, beg_range, True)
+ vrange[1] = self.BinarySearchTime(1, self.last_id + 1, end_range, False)
+ print "vrange3 ", vrange
+ return True
+
+ def AddTimeRange(self, value, ranges):
+ print "value ", value
+ n = value.count("-")
+ if n == 1:
+ pass
+ elif n == 2:
+ if value.split("-")[1].strip() == "":
+ n = 1
+ elif n == 3:
+ n = 2
+ else:
+ return False
+ pos = findnth(value, "-", n)
+ vrange = [value[:pos].strip() ,value[pos+1:].strip()]
+ if self.ConvertTimeRange(vrange):
+ ranges.append(vrange)
+ return True
+ return False
+
+ def InvalidValue(self, value):
+ self.value = ""
+ palette = QPalette()
+ palette.setColor(QPalette.Text,Qt.red)
+ self.widget.setPalette(palette)
+ self.red = True
+ self.error = self.label + " invalid value '" + value + "'"
+ self.parent.ShowMessage(self.error)
+
+ def IsNumber(self, value):
+ try:
+ x = int(value)
+ except:
+ x = 0
+ return str(x) == value
+
+ def Invalidate(self):
+ self.validated = False
+
+ def Validate(self):
+ input_string = self.widget.text()
+ self.validated = True
+ if self.red:
+ palette = QPalette()
+ self.widget.setPalette(palette)
+ self.red = False
+ if not len(input_string.strip()):
+ self.error = ""
+ self.value = ""
+ return
+ if self.table_name == "<timeranges>":
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if not self.AddTimeRange(value, ranges):
+ return self.InvalidValue(value)
+ ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+ self.value = " OR ".join(ranges)
+ elif self.table_name == "<ranges>":
+ singles = []
+ ranges = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ if "-" in value:
+ vrange = value.split("-")
+ if len(vrange) != 2 or not self.IsNumber(vrange[0]) or not self.IsNumber(vrange[1]):
+ return self.InvalidValue(value)
+ ranges.append(vrange)
+ else:
+ if not self.IsNumber(value):
+ return self.InvalidValue(value)
+ singles.append(value)
+ ranges = [("(" + self.column_name1 + " >= " + r[0] + " AND " + self.column_name1 + " <= " + r[1] + ")") for r in ranges]
+ if len(singles):
+ ranges.append(self.column_name1 + " IN (" + ",".join(singles) + ")")
+ self.value = " OR ".join(ranges)
+ elif self.table_name:
+ all_ids = []
+ for value in [x.strip() for x in input_string.split(",")]:
+ ids = self.ValueToIds(value)
+ if len(ids):
+ all_ids.extend(ids)
+ else:
+ return self.InvalidValue(value)
+ self.value = self.column_name1 + " IN (" + ",".join(all_ids) + ")"
+ if self.column_name2:
+ self.value = "( " + self.value + " OR " + self.column_name2 + " IN (" + ",".join(all_ids) + ") )"
+ else:
+ self.value = input_string.strip()
+ self.error = ""
+ self.parent.ClearMessage()
+
+ def IsValid(self):
+ if not self.validated:
+ self.Validate()
+ if len(self.error):
+ self.parent.ShowMessage(self.error)
+ return False
+ return True
+
+# Selected branch report creation dialog
+
+class SelectedBranchDialog(QDialog):
+
+ def __init__(self, glb, parent=None):
+ super(SelectedBranchDialog, self).__init__(parent)
+
+ self.glb = glb
+
+ self.name = ""
+ self.where_clause = ""
+
+ self.setWindowTitle("Selected Branches")
+ self.setMinimumWidth(600)
+
+ items = (
+ ("Report name:", "Enter a name to appear in the window title bar", "", "", "", ""),
+ ("Time ranges:", "Enter time ranges", "<timeranges>", "", "samples.id", ""),
+ ("CPUs:", "Enter CPUs or ranges e.g. 0,5-6", "<ranges>", "", "cpu", ""),
+ ("Commands:", "Only branches with these commands will be included", "comms", "comm", "comm_id", ""),
+ ("PIDs:", "Only branches with these process IDs will be included", "threads", "pid", "thread_id", ""),
+ ("TIDs:", "Only branches with these thread IDs will be included", "threads", "tid", "thread_id", ""),
+ ("DSOs:", "Only branches with these DSOs will be included", "dsos", "short_name", "samples.dso_id", "to_dso_id"),
+ ("Symbols:", "Only branches with these symbols will be included", "symbols", "name", "symbol_id", "to_symbol_id"),
+ ("Raw SQL clause: ", "Enter a raw SQL WHERE clause", "", "", "", ""),
+ )
+ self.data_items = [SQLTableDialogDataItem(glb, *x, parent=self) for x in items]
+
+ self.grid = QGridLayout()
+
+ for row in xrange(len(self.data_items)):
+ self.grid.addWidget(QLabel(self.data_items[row].label), row, 0)
+ self.grid.addWidget(self.data_items[row].widget, row, 1)
+
+ self.status = QLabel()
+
+ self.ok_button = QPushButton("Ok", self)
+ self.ok_button.setDefault(True)
+ self.ok_button.released.connect(self.Ok)
+ self.ok_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.cancel_button = QPushButton("Cancel", self)
+ self.cancel_button.released.connect(self.reject)
+ self.cancel_button.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
+
+ self.hbox = QHBoxLayout()
+ #self.hbox.addStretch()
+ self.hbox.addWidget(self.status)
+ self.hbox.addWidget(self.ok_button)
+ self.hbox.addWidget(self.cancel_button)
+
+ self.vbox = QVBoxLayout()
+ self.vbox.addLayout(self.grid)
+ self.vbox.addLayout(self.hbox)
+
+ self.setLayout(self.vbox);
+
+ def Ok(self):
+ self.name = self.data_items[0].value
+ if not self.name:
+ self.ShowMessage("Report name is required")
+ return
+ for d in self.data_items:
+ if not d.IsValid():
+ return
+ for d in self.data_items[1:]:
+ if len(d.value):
+ if len(self.where_clause):
+ self.where_clause += " AND "
+ self.where_clause += d.value
+ if len(self.where_clause):
+ self.where_clause = " AND ( " + self.where_clause + " ) "
+ else:
+ self.ShowMessage("No selection")
+ return
+ self.accept()
+
+ def ShowMessage(self, msg):
+ self.status.setText("<font color=#FF0000>" + msg)
+
+ def ClearMessage(self):
+ self.status.setText("")
+
# Event list
def GetEventList(db):
def FindDone(self, row):
self.find_bar.Idle()
if row >= 0:
- self.view.setCurrentIndex(self.model.index(row, 0, QModelIndex()))
+ self.view.setCurrentIndex(self.model.mapFromSource(self.data_model.index(row, 0, QModelIndex())))
else:
self.find_bar.NotFound()
def setActiveSubWindow(self, nr):
self.mdi_area.setActiveSubWindow(self.mdi_area.subWindowList()[nr - 1])
+# Help text
+
+glb_help_text = """
+<h1>Contents</h1>
+<style>
+p.c1 {
+ text-indent: 40px;
+}
+p.c2 {
+ text-indent: 80px;
+}
+}
+</style>
+<p class=c1><a href=#reports>1. Reports</a></p>
+<p class=c2><a href=#callgraph>1.1 Context-Sensitive Call Graph</a></p>
+<p class=c2><a href=#allbranches>1.2 All branches</a></p>
+<p class=c2><a href=#selectedbranches>1.3 Selected branches</a></p>
+<p class=c1><a href=#tables>2. Tables</a></p>
+<h1 id=reports>1. Reports</h1>
+<h2 id=callgraph>1.1 Context-Sensitive Call Graph</h2>
+The result is a GUI window with a tree representing a context-sensitive
+call-graph. Expanding a couple of levels of the tree and adjusting column
+widths to suit will display something like:
+<pre>
+ Call Graph: pt_example
+Call Path Object Count Time(ns) Time(%) Branch Count Branch Count(%)
+v- ls
+ v- 2638:2638
+ v- _start ld-2.19.so 1 10074071 100.0 211135 100.0
+ |- unknown unknown 1 13198 0.1 1 0.0
+ >- _dl_start ld-2.19.so 1 1400980 13.9 19637 9.3
+ >- _d_linit_internal ld-2.19.so 1 448152 4.4 11094 5.3
+ v-__libc_start_main@plt ls 1 8211741 81.5 180397 85.4
+ >- _dl_fixup ld-2.19.so 1 7607 0.1 108 0.1
+ >- __cxa_atexit libc-2.19.so 1 11737 0.1 10 0.0
+ >- __libc_csu_init ls 1 10354 0.1 10 0.0
+ |- _setjmp libc-2.19.so 1 0 0.0 4 0.0
+ v- main ls 1 8182043 99.6 180254 99.9
+</pre>
+<h3>Points to note:</h3>
+<ul>
+<li>The top level is a command name (comm)</li>
+<li>The next level is a thread (pid:tid)</li>
+<li>Subsequent levels are functions</li>
+<li>'Count' is the number of calls</li>
+<li>'Time' is the elapsed time until the function returns</li>
+<li>Percentages are relative to the level above</li>
+<li>'Branch Count' is the total number of branches for that function and all functions that it calls
+</ul>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds function names by either an exact match or a pattern match.
+The pattern matching symbols are ? for any character and * for zero or more characters.
+<h2 id=allbranches>1.2 All branches</h2>
+The All branches report displays all branches in chronological order.
+Not all data is fetched immediately. More records can be fetched using the Fetch bar provided.
+<h3>Disassembly</h3>
+Open a branch to display disassembly. This only works if:
+<ol>
+<li>The disassembler is available. Currently, only Intel XED is supported - see <a href=#xed>Intel XED Setup</a></li>
+<li>The object code is available. Currently, only the perf build ID cache is searched for object code.
+The default directory ~/.debug can be overridden by setting environment variable PERF_BUILDID_DIR.
+One exception is kcore where the DSO long name is used (refer dsos_view on the Tables menu),
+or alternatively, set environment variable PERF_KCORE to the kcore file name.</li>
+</ol>
+<h4 id=xed>Intel XED Setup</h4>
+To use Intel XED, libxed.so must be present. To build and install libxed.so:
+<pre>
+git clone https://github.com/intelxed/mbuild.git mbuild
+git clone https://github.com/intelxed/xed
+cd xed
+./mfile.py --share
+sudo ./mfile.py --prefix=/usr/local install
+sudo ldconfig
+</pre>
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<h2 id=selectedbranches>1.3 Selected branches</h2>
+This is the same as the <a href=#allbranches>All branches</a> report but with the data reduced
+by various selection criteria. A dialog box displays available criteria which are AND'ed together.
+<h3>1.3.1 Time ranges</h3>
+The time ranges hint text shows the total time range. Relative time ranges can also be entered in
+ms, us or ns. Also, negative values are relative to the end of trace. Examples:
+<pre>
+ 81073085947329-81073085958238 From 81073085947329 to 81073085958238
+ 100us-200us From 100us to 200us
+ 10ms- From 10ms to the end
+ -100ns The first 100ns
+ -10ms- The last 10ms
+</pre>
+N.B. Due to the granularity of timestamps, there could be no branches in any given time range.
+<h1 id=tables>2. Tables</h1>
+The Tables menu shows all tables and views in the database. Most tables have an associated view
+which displays the information in a more friendly way. Not all data for large tables is fetched
+immediately. More records can be fetched using the Fetch bar provided. Columns can be sorted,
+but that can be slow for large tables.
+<p>There are also tables of database meta-information.
+For SQLite3 databases, the sqlite_master table is included.
+For PostgreSQL databases, information_schema.tables/views/columns are included.
+<h3>Find</h3>
+Ctrl-F displays a Find bar which finds substrings by either an exact match or a regular expression match.
+Refer to Python documentation for the regular expression syntax.
+All columns are searched, but only currently fetched rows are searched.
+<p>N.B. Results are found in id order, so if the table is re-ordered, find-next and find-previous
+will go to the next/previous result in id order, instead of display order.
+"""
+
+# Help window
+
+class HelpWindow(QMdiSubWindow):
+
+ def __init__(self, glb, parent=None):
+ super(HelpWindow, self).__init__(parent)
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setWidget(self.text)
+
+ AddSubWindow(glb.mainwindow.mdi_area, self, "Exported SQL Viewer Help")
+
+# Main window that only displays the help text
+
+class HelpOnlyWindow(QMainWindow):
+
+ def __init__(self, parent=None):
+ super(HelpOnlyWindow, self).__init__(parent)
+
+ self.setMinimumSize(200, 100)
+ self.resize(800, 600)
+ self.setWindowTitle("Exported SQL Viewer Help")
+ self.setWindowIcon(self.style().standardIcon(QStyle.SP_MessageBoxInformation))
+
+ self.text = QTextBrowser()
+ self.text.setHtml(glb_help_text)
+ self.text.setReadOnly(True)
+ self.text.setOpenExternalLinks(True)
+
+ self.setCentralWidget(self.text)
+
# Font resize
def ResizeFont(widget, diff):
self.window_menu = WindowMenu(self.mdi_area, menu)
+ help_menu = menu.addMenu("&Help")
+ help_menu.addAction(CreateAction("&Exported SQL Viewer Help", "Helpful information", self.Help, self, QKeySequence.HelpContents))
+
def Find(self):
win = self.mdi_area.activeSubWindow()
if win:
if event == "branches":
label = "All branches" if branches_events == 1 else "All branches " + "(id=" + dbid + ")"
reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewBranchView(x), self))
+ label = "Selected branches" if branches_events == 1 else "Selected branches " + "(id=" + dbid + ")"
+ reports_menu.addAction(CreateAction(label, "Create a new window displaying branch events", lambda x=dbid: self.NewSelectedBranchView(x), self))
def TableMenu(self, tables, menu):
table_menu = menu.addMenu("&Tables")
def NewBranchView(self, event_id):
BranchWindow(self.glb, event_id, "", "", self)
+ def NewSelectedBranchView(self, event_id):
+ dialog = SelectedBranchDialog(self.glb, self)
+ ret = dialog.exec_()
+ if ret:
+ BranchWindow(self.glb, event_id, dialog.name, dialog.where_clause, self)
+
def NewTableView(self, table_name):
TableWindow(self.glb, table_name, self)
+ def Help(self):
+ HelpWindow(self.glb, self)
+
# XED Disassembler
class xed_state_t(Structure):
class LibXED():
def __init__(self):
- self.libxed = CDLL("libxed.so")
+ try:
+ self.libxed = CDLL("libxed.so")
+ except:
+ self.libxed = None
+ if not self.libxed:
+ self.libxed = CDLL("/usr/local/lib/libxed.so")
self.xed_tables_init = self.libxed.xed_tables_init
self.xed_tables_init.restype = None
def Main():
if (len(sys.argv) < 2):
- print >> sys.stderr, "Usage is: exported-sql-viewer.py <database name>"
+ print >> sys.stderr, "Usage is: exported-sql-viewer.py {<database name> | --help-only}"
raise Exception("Too few arguments")
dbname = sys.argv[1]
+ if dbname == "--help-only":
+ app = QApplication(sys.argv)
+ mainwindow = HelpOnlyWindow()
+ mainwindow.show()
+ err = app.exec_()
+ sys.exit(err)
is_sqlite3 = False
try:
sample_period=0
freq=0
write_backward=0
-sample_id_all=0
leader->forced_leader = true;
}
}
+
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list,
+ struct perf_evsel *evsel)
+{
+ struct perf_evsel *c2, *leader;
+ bool is_open = true;
+
+ leader = evsel->leader;
+ pr_debug("Weak group for %s/%d failed\n",
+ leader->name, leader->nr_members);
+
+ /*
+ * for_each_group_member doesn't work here because it doesn't
+ * include the first entry.
+ */
+ evlist__for_each_entry(evsel_list, c2) {
+ if (c2 == evsel)
+ is_open = false;
+ if (c2->leader == leader) {
+ if (is_open)
+ perf_evsel__close(c2);
+ c2->leader = c2;
+ c2->nr_members = 0;
+ }
+ }
+ return leader;
+}
void perf_evlist__force_leader(struct perf_evlist *evlist);
+struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
#endif /* __PERF_EVLIST_H */
attr->sample_freq = 0;
attr->sample_period = 0;
attr->write_backward = 0;
- attr->sample_id_all = 0;
}
if (opts->no_samples)
decoder->have_calc_cyc_to_tsc = false;
intel_pt_calc_cyc_to_tsc(decoder, true);
}
+
+ intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder)
decoder->timestamp = timestamp;
decoder->timestamp_insn_cnt = 0;
+
+ intel_pt_log_to("Setting timestamp", decoder->timestamp);
}
/* Walk PSB+ packets when already in sync. */
static char log_name[MAX_LOG_NAME];
bool intel_pt_enable_logging;
+void *intel_pt_log_fp(void)
+{
+ return f;
+}
+
void intel_pt_log_enable(void)
{
intel_pt_enable_logging = true;
struct intel_pt_pkt;
+void *intel_pt_log_fp(void);
void intel_pt_log_enable(void);
void intel_pt_log_disable(void);
void intel_pt_log_set_name(const char *name);
intel_pt_dump(pt, buf, len);
}
+static void intel_pt_log_event(union perf_event *event)
+{
+ FILE *f = intel_pt_log_fp();
+
+ if (!intel_pt_enable_logging || !f)
+ return;
+
+ perf_event__fprintf(event, f);
+}
+
static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
struct auxtrace_buffer *b)
{
event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
err = intel_pt_context_switch(pt, event, sample);
- intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
- perf_event__name(event->header.type), event->header.type,
- sample->cpu, sample->time, timestamp);
+ intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
+ event->header.type, sample->cpu, sample->time, timestamp);
+ intel_pt_log_event(event);
return err;
}
if (!is_arm_pmu_core(name)) {
pname = pe->pmu ? pe->pmu : "cpu";
- if (strncmp(pname, name, strlen(pname)))
+ if (strcmp(pname, name))
continue;
}