Felix Moeller <felix@derklecks.de>
Filipe Lautert <filipe@icewall.org>
Franck Bui-Huu <vagabon.xyz@gmail.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
Frank Zago <fzago@systemfabricworks.com>
Greg Kroah-Hartman <greg@echidna.(none)>
Greg Kroah-Hartman <gregkh@suse.de>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com>
The ARC HS can be configured with a pipeline performance monitor for counting
CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters.
+are 100+ hardware conditions dynamically mapped to up to 32 counters.
It also supports overflow interrupts.
Required properties:
The ARC700 can be configured with a pipeline performance monitor for counting
CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters
+are 100+ hardware conditions dynamically mapped to up to 32 counters
Note that:
* The ARC 700 PCT does not support interrupts; although HW events may be
Required properties :
- reg : Offset and length of the register set for the device
- - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
- "rockchip,rk3288-i2c".
+ - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
+ "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
- interrupts : interrupt number
- clocks : parent clock
"Zone Order" orders the zonelists by zone type, then by node within each
zone. Specify "[Zz]one" for zone order.
-Specify "[Dd]efault" to request automatic configuration. Autoconfiguration
-will select "node" order in following case.
-(1) if the DMA zone does not exist or
-(2) if the DMA zone comprises greater than 50% of the available memory or
-(3) if any node's DMA zone comprises greater than 70% of its local memory and
- the amount of local memory is big enough.
-
-Otherwise, "zone" order will be selected. Default order is recommended unless
-this is causing problems for your system/application.
+Specify "[Dd]efault" to request automatic configuration.
+
+On 32-bit, the Normal zone needs to be preserved for allocations accessible
+by the kernel, so "zone" order will be selected.
+
+On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
+order will be selected.
+
+Default order is recommended unless this is causing problems for your
+system/application.
==============================================================
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Or Gerlitz <ogerlitz@mellanox.com>
-M: Sagi Grimberg <sagig@mellanox.com>
+M: Sagi Grimberg <sagi@grimberg.me>
M: Roi Dayan <roid@mellanox.com>
L: linux-rdma@vger.kernel.org
S: Supported
F: drivers/infiniband/ulp/iser/
ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
-M: Sagi Grimberg <sagig@mellanox.com>
+M: Sagi Grimberg <sagi@grimberg.me>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
L: linux-rdma@vger.kernel.org
L: target-devel@vger.kernel.org
F: mm/kmemleak-test.c
KPROBES
-M: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
VERSION = 4
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Blurry Fish Butt
+EXTRAVERSION = -rc6
+NAME = Charred Weasel
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_GENERIC_DMA_COHERENT
config MIGHT_HAVE_PCI
bool
#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
+/* status32 Bits as encoded/expected by CLRI/SETI */
+#define CLRI_STATUS_IE_BIT 4
+
+#define CLRI_STATUS_E_MASK 0xF
+#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
+
#define AUX_USER_SP 0x00D
#define AUX_IRQ_CTRL 0x00E
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
:
: "memory");
+ /* To be compatible with irq_save()/irq_restore()
+ * encode the irq bits as expected by CLRI/SETI
+ * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
+ */
+ temp = (1 << 5) |
+ ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
+ (temp & CLRI_STATUS_E_MASK);
return temp;
}
*/
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return !(flags & (STATUS_IE_MASK));
+ return !(flags & CLRI_STATUS_IE_MASK);
}
static inline int arch_irqs_disabled(void)
#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
.macro IRQ_DISABLE scratch
clri
+ TRACE_ASM_IRQ_DISABLE
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
seti
.endm
clri ; To make status32.IE agree with CPU internal state
- lr r0, [ICAUSE]
+#ifdef CONFIG_TRACE_IRQFLAGS
+ TRACE_ASM_IRQ_DISABLE
+#endif
+ lr r0, [ICAUSE]
mov blink, ret_from_exception
b.d arch_do_IRQ
.Lrestore_regs:
+ # Interrpts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
+ TRACE_ASM_IRQ_ENABLE
+
ld r0, [sp, PT_status32] ; U/K mode at time of entry
lr r10, [AUX_IRQ_ACT]
.Lrestore_regs:
+ # Interrpts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
TRACE_ASM_IRQ_ENABLE
lr r10, [status32]
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
#endif
+#include <linux/of_fdt.h>
#include <linux/swap.h>
#include <linux/module.h>
#include <linux/highmem.h>
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
memblock_dump_all();
/*----------------- node/zones setup --------------------------*/
"=r" (charcnt), /* %1 Output */
"=r" (dwordcnt), /* %2 Output */
"=r" (fill8reg), /* %3 Output */
- "=r" (wrkrega) /* %4 Output */
+ "=&r" (wrkrega) /* %4 Output only */
: "r" (c), /* %5 Input */
"0" (s), /* %0 Input/Output */
"1" (count) /* %1 Input/Output */
SYSCALL(ni_syscall)
SYSCALL(mlock2)
SYSCALL(copy_file_range)
+COMPAT_SYS_SPU(preadv2)
+COMPAT_SYS_SPU(pwritev2)
#include <uapi/asm/unistd.h>
-#define NR_syscalls 380
+#define NR_syscalls 382
#define __NR__exit __NR_exit
#define __NR_membarrier 365
#define __NR_mlock2 378
#define __NR_copy_file_range 379
+#define __NR_preadv2 380
+#define __NR_pwritev2 381
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
spinlock_t list_lock;
struct list_head pgtable_list;
struct list_head gmap_list;
- unsigned long asce_bits;
+ unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context allocates 4K page tables. */
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
- if (mm->context.asce_limit == 0) {
+ switch (mm->context.asce_limit) {
+ case 1UL << 42:
+ /*
+ * forked 3-level task, fall through to set new asce with new
+ * mm->pgd
+ */
+ case 0:
/* context created by exec, set asce limit to 4TB */
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX;
- } else if (mm->context.asce_limit == (1UL << 31)) {
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ break;
+ case 1UL << 53:
+ /* forked 4-level task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ break;
+ case 1UL << 31:
+ /* forked 2-level compat task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ /* pgd_alloc() did not increase mm->nr_pmds */
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
static inline void set_user_asce(struct mm_struct *mm)
{
- S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+ S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE);
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
+ S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
return _REGION2_ENTRY_EMPTY;
}
-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
+int crst_table_upgrade(struct mm_struct *);
+void crst_table_downgrade(struct mm_struct *);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
- crst_table_downgrade(current->mm, 1UL << 31); \
+ crst_table_downgrade(current->mm); \
execve_tail(); \
} while (0)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte(init_mm.context.asce);
else
__tlb_flush_global();
}
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte_local(init_mm.context.asce);
else
__tlb_flush_local();
}
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, (unsigned long) mm->pgd |
- mm->context.asce_bits);
+ __tlb_flush_asce(mm, mm->context.asce);
else
__tlb_flush_full(mm);
}
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
- S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ S390_lowcore.kernel_asce = init_mm.context.asce;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
if (!(flags & MAP_FIXED))
addr = 0;
if ((addr + len) >= TASK_SIZE)
- return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
+ return crst_table_upgrade(current->mm);
return 0;
}
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
__tlb_flush_local();
}
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+int crst_table_upgrade(struct mm_struct *mm)
{
unsigned long *table, *pgd;
- unsigned long entry;
- int flush;
- BUG_ON(limit > TASK_MAX_SIZE);
- flush = 0;
-repeat:
+ /* upgrade should only happen from 3 to 4 levels */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
+
spin_lock_bh(&mm->page_table_lock);
- if (mm->context.asce_limit < limit) {
- pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit <= (1UL << 31)) {
- entry = _REGION3_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- } else {
- entry = _REGION2_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 53;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION2;
- }
- crst_table_init(table, entry);
- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->task_size = mm->context.asce_limit;
- table = NULL;
- flush = 1;
- }
+ pgd = (unsigned long *) mm->pgd;
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+ mm->pgd = (pgd_t *) table;
+ mm->context.asce_limit = 1UL << 53;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm->task_size = mm->context.asce_limit;
spin_unlock_bh(&mm->page_table_lock);
- if (table)
- crst_table_free(mm, table);
- if (mm->context.asce_limit < limit)
- goto repeat;
- if (flush)
- on_each_cpu(__crst_table_upgrade, mm, 0);
+
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+void crst_table_downgrade(struct mm_struct *mm)
{
pgd_t *pgd;
+ /* downgrade should only happen from 3 to 2 levels (compat only) */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
- while (mm->context.asce_limit > limit) {
- pgd = mm->pgd;
- switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
- case _REGION_ENTRY_TYPE_R2:
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- break;
- case _REGION_ENTRY_TYPE_R3:
- mm->context.asce_limit = 1UL << 31;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_SEGMENT;
- break;
- default:
- BUG();
- }
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->task_size = mm->context.asce_limit;
- crst_table_free(mm, (unsigned long *) pgd);
- }
+
+ pgd = mm->pgd;
+ mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+ mm->context.asce_limit = 1UL << 31;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+
if (current->active_mm == mm)
set_user_asce(mm);
}
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
- goto out_clean;
+ goto out;
}
/*
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
- goto out_reg;
+ goto free_dma_table;
}
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
- goto out_reg;
- return 0;
+ goto free_bitmap;
-out_reg:
+ return 0;
+free_bitmap:
+ vfree(zdev->iommu_bitmap);
+ zdev->iommu_bitmap = NULL;
+free_dma_table:
dma_free_cpu_table(zdev->dma_table);
-out_clean:
+ zdev->dma_table = NULL;
+out:
return rc;
}
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
# CONFIG_INET_LRO is not set
-CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
#define SUN4V_CHIP_SPARC_M6 0x06
#define SUN4V_CHIP_SPARC_M7 0x07
#define SUN4V_CHIP_SPARC64X 0x8a
+#define SUN4V_CHIP_SPARC_SN 0x8b
#define SUN4V_CHIP_UNKNOWN 0xff
#ifndef __ASSEMBLY__
#define __NR_setsockopt 355
#define __NR_mlock2 356
#define __NR_copy_file_range 357
+#define __NR_preadv2 358
+#define __NR_pwritev2 359
-#define NR_syscalls 358
+#define NR_syscalls 360
/* Bitmask values returned from kern_features system call. */
#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
subcc %g1, %g2, %g1 ! Next cacheline
bge,pt %icc, 1b
nop
- ba,pt %xcc, dcpe_icpe_tl1_common
- nop
+ ba,a,pt %xcc, dcpe_icpe_tl1_common
do_dcpe_tl1_fatal:
sethi %hi(1f), %g7
mov 0x2, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_dcpe_tl1,.-do_dcpe_tl1
.globl do_icpe_tl1
subcc %g1, %g2, %g1
bge,pt %icc, 1b
nop
- ba,pt %xcc, dcpe_icpe_tl1_common
- nop
+ ba,a,pt %xcc, dcpe_icpe_tl1_common
do_icpe_tl1_fatal:
sethi %hi(1f), %g7
mov 0x3, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_icpe_tl1,.-do_icpe_tl1
.type dcpe_icpe_tl1_common,#function
cmp %g2, 0x63
be c_cee
nop
- ba,pt %xcc, c_deferred
+ ba,a,pt %xcc, c_deferred
.size __cheetah_log_error,.-__cheetah_log_error
/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
sparc_pmu_type = "sparc-m7";
break;
+ case SUN4V_CHIP_SPARC_SN:
+ sparc_cpu_type = "SPARC-SN";
+ sparc_fpu_type = "SPARC-SN integrated FPU";
+ sparc_pmu_type = "sparc-sn";
+ break;
+
case SUN4V_CHIP_SPARC64X:
sparc_cpu_type = "SPARC64-X";
sparc_fpu_type = "SPARC64-X integrated FPU";
case SUN4V_CHIP_NIAGARA5:
case SUN4V_CHIP_SPARC_M6:
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
case SUN4V_CHIP_SPARC64X:
rover_inc_table = niagara_iterate_method;
break;
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
- b,pt %xcc, fpdis_exit
- nop
+ ba,a,pt %xcc, fpdis_exit
+
2: andcc %g5, FPRS_DU, %g0
bne,pt %icc, 3f
fzero %f32
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
- ba,pt %xcc, fpdis_exit
- nop
+ ba,a,pt %xcc, fpdis_exit
+
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
fp_other_bounce:
call do_fpother
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size fp_other_bounce,.-fp_other_bounce
.align 32
cmp %g2, 'T'
be,pt %xcc, 70f
cmp %g2, 'M'
+ be,pt %xcc, 70f
+ cmp %g2, 'S'
bne,pn %xcc, 49f
nop
cmp %g2, '7'
be,pt %xcc, 5f
mov SUN4V_CHIP_SPARC_M7, %g4
+ cmp %g2, 'N'
+ be,pt %xcc, 5f
+ mov SUN4V_CHIP_SPARC_SN, %g4
ba,pt %xcc, 49f
nop
subcc %g3, 1, %g3
bne,pt %xcc, 41b
add %g1, 1, %g1
- mov SUN4V_CHIP_SPARC64X, %g4
ba,pt %xcc, 5f
- nop
+ mov SUN4V_CHIP_SPARC64X, %g4
49:
mov SUN4V_CHIP_UNKNOWN, %g4
stxa %g0, [%g7] ASI_DMMU
membar #Sync
- ba,pt %xcc, sun4u_continue
- nop
+ ba,a,pt %xcc, sun4u_continue
sun4v_init:
/* Set ctx 0 */
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
- ba,pt %xcc, niagara_tlb_fixup
- nop
+ ba,a,pt %xcc, niagara_tlb_fixup
sun4u_continue:
BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
- ba,pt %xcc, spitfire_tlb_fixup
- nop
+ ba,a,pt %xcc, spitfire_tlb_fixup
niagara_tlb_fixup:
mov 3, %g2 /* Set TLB type to hypervisor. */
be,pt %xcc, niagara4_patch
nop
cmp %g1, SUN4V_CHIP_SPARC_M7
+ be,pt %xcc, niagara4_patch
+ nop
+ cmp %g1, SUN4V_CHIP_SPARC_SN
be,pt %xcc, niagara4_patch
nop
call hypervisor_patch_cachetlbops
nop
- ba,pt %xcc, tlb_fixup_done
- nop
+ ba,a,pt %xcc, tlb_fixup_done
cheetah_tlb_fixup:
mov 2, %g2 /* Set TLB type to cheetah+. */
call cheetah_patch_cachetlbops
nop
- ba,pt %xcc, tlb_fixup_done
- nop
+ ba,a,pt %xcc, tlb_fixup_done
spitfire_tlb_fixup:
/* Set TLB type to spitfire. */
call %o1
add %sp, (2047 + 128), %o0
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
1: sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
/* Disable STICK_INT interrupts. */
1:
109: or %g7, %lo(109b), %g7
call do_privact
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __do_privact,.-__do_privact
.type do_mna,#function
mov %l5, %o2
call mem_address_unaligned
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_mna,.-do_mna
.type do_lddfmna,#function
mov %l5, %o2
call handle_lddfmna
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_lddfmna,.-do_lddfmna
.type do_stdfmna,#function
mov %l5, %o2
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function
}
}
+static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
+ void *stc, void *host_controller,
+ struct platform_device *op,
+ int numa_node)
+{
+ sd->iommu = iommu;
+ sd->stc = stc;
+ sd->host_controller = host_controller;
+ sd->op = op;
+ sd->numa_node = numa_node;
+}
+
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus, int devfn)
if (!dev)
return NULL;
+ op = of_find_device_by_node(node);
sd = &dev->dev.archdata;
- sd->iommu = pbm->iommu;
- sd->stc = &pbm->stc;
- sd->host_controller = pbm;
- sd->op = op = of_find_device_by_node(node);
- sd->numa_node = pbm->numa_node;
-
+ pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
+ pbm->numa_node);
sd = &op->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
/* No special bus mastering setup handling */
}
+#ifdef CONFIG_PCI_IOV
+int pcibios_add_device(struct pci_dev *dev)
+{
+ struct pci_dev *pdev;
+
+ /* Add sriov arch specific initialization here.
+ * Copy dev_archdata from PF to VF
+ */
+ if (dev->is_virtfn) {
+ struct dev_archdata *psd;
+
+ pdev = dev->physfn;
+ psd = &pdev->dev.archdata;
+ pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
+ psd->stc, psd->host_controller, NULL,
+ psd->numa_node);
+ }
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static int __init pcibios_init(void)
{
pci_dfl_cache_line_size = 64 >> 2;
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
- if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+ if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN)
sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
&__sun_m7_2insn_patch_end);
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_N2;
}
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M6 ||
sun4v_chip_type == SUN4V_CHIP_SPARC_M7 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC_SN ||
sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
ba,pt %xcc, etraptl1
rd %pc, %g7
- ba,pt %xcc, 2f
- nop
+ ba,a,pt %xcc, 2f
1: ba,pt %xcc, etrap_irq
rd %pc, %g7
mov %l5, %o2
call spitfire_access_error
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_access_error,.-__spitfire_access_error
/* This is the trap handler entry point for ECC correctable
mov %l5, %o2
call spitfire_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
.type __spitfire_data_access_exception,#function
mov %l5, %o2
call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_data_access_exception,.-__spitfire_data_access_exception
.type __spitfire_insn_access_exception_tl1,#function
mov %l5, %o2
call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
.type __spitfire_insn_access_exception,#function
mov %l5, %o2
call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
.size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
-/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range
+/*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range
+ .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2
#endif /* CONFIG_COMPAT */
/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
.word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
/*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen
- .word sys_setsockopt, sys_mlock2, sys_copy_file_range
+ .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2
mov %l4, %o1
call bad_trap
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
invoke_utrap:
sllx %g3, 3, %g3
return NULL;
}
+static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct vio_dev *vio_dev = to_vio_dev(dev);
+
+ add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, vio_dev->compat);
+ return 0;
+}
+
static int vio_bus_match(struct device *dev, struct device_driver *drv)
{
struct vio_dev *vio_dev = to_vio_dev(dev);
return sprintf(buf, "%s\n", vdev->type);
}
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ const struct vio_dev *vdev = to_vio_dev(dev);
+
+ return sprintf(buf, "vio:T%sS%s\n", vdev->type, vdev->compat);
+}
+
static struct device_attribute vio_dev_attrs[] = {
__ATTR_RO(devspec),
__ATTR_RO(type),
+ __ATTR_RO(modalias),
__ATTR_NULL
};
static struct bus_type vio_bus_type = {
.name = "vio",
.dev_attrs = vio_dev_attrs,
+ .uevent = vio_hotplug,
.match = vio_bus_match,
.probe = vio_device_probe,
.remove = vio_device_remove,
jiffies = jiffies_64;
#endif
+#ifdef CONFIG_SPARC64
+ASSERT((swapper_tsb == 0x0000000000408000), "Error: sparc64 early assembler too large")
+#endif
+
SECTIONS
{
#ifdef CONFIG_SPARC64
rd %pc, %g7
call do_sparc64_fault
add %sp, PTREGS_OFF, %o0
- ba,pt %xcc, rtrap
- nop
+ ba,a,pt %xcc, rtrap
/* Be very careful about usage of the trap globals here.
* You cannot touch %g5 as that has the fault information.
max_phys_bits = 47;
break;
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
default:
/* M7 and later support 52-bit virtual addresses. */
sparc64_va_hole_top = 0xfff8000000000000UL;
*/
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
pagecv_flag = 0x00;
break;
default:
*/
switch (sun4v_chip_type) {
case SUN4V_CHIP_SPARC_M7:
+ case SUN4V_CHIP_SPARC_SN:
page_cache4v_flag = _PAGE_CP_4V;
break;
default:
/*
* AMD Performance Monitor K7 and later.
*/
-static const u64 amd_perfmon_event_map[] =
+static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
case 78: /* 14nm Skylake Mobile */
case 94: /* 14nm Skylake Desktop */
+ case 85: /* 14nm Skylake Server */
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
#define LBR_PLM (LBR_KERNEL | LBR_USER)
-#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */
+#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
#define LBR_NOT_SUPP -1 /* LBR filter not supported */
#define LBR_IGN 0 /* ignored */
* The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
* in suppress mode. So LBR_SELECT should be set to
* (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
+ * But the 10th bit LBR_CALL_STACK does not operate
+ * in suppress mode.
*/
- reg->config = mask ^ x86_pmu.lbr_sel_mask;
+ reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
(br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
struct dev_ext_attribute *de_attrs;
struct attribute **attrs;
size_t size;
+ u64 reg;
int ret;
long i;
+ if (boot_cpu_has(X86_FEATURE_VMX)) {
+ /*
+ * Intel SDM, 36.5 "Tracing post-VMXON" says that
+ * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
+ * post-VMXON.
+ */
+ rdmsrl(MSR_IA32_VMX_MISC, reg);
+ if (reg & BIT(14))
+ pt_pmu.vmx = true;
+ }
+
attrs = NULL;
for (i = 0; i < PT_CPUID_LEAVES; i++) {
reg |= (event->attr.config & PT_CONFIG_MASK);
+ event->hw.config = reg;
wrmsrl(MSR_IA32_RTIT_CTL, reg);
}
-static void pt_config_start(bool start)
+static void pt_config_stop(struct perf_event *event)
{
- u64 ctl;
+ u64 ctl = READ_ONCE(event->hw.config);
+
+ /* may be already stopped by a PMI */
+ if (!(ctl & RTIT_CTL_TRACEEN))
+ return;
- rdmsrl(MSR_IA32_RTIT_CTL, ctl);
- if (start)
- ctl |= RTIT_CTL_TRACEEN;
- else
- ctl &= ~RTIT_CTL_TRACEEN;
+ ctl &= ~RTIT_CTL_TRACEEN;
wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+ WRITE_ONCE(event->hw.config, ctl);
+
/*
* A wrmsr that disables trace generation serializes other PT
* registers and causes all data packets to be written to memory,
* The below WMB, separating data store and aux_head store matches
* the consumer's RMB that separates aux_head load and data load.
*/
- if (!start)
- wmb();
+ wmb();
}
static void pt_config_buffer(void *buf, unsigned int topa_idx,
if (!ACCESS_ONCE(pt->handle_nmi))
return;
- pt_config_start(false);
+ /*
+ * If VMX is on and PT does not support it, don't touch anything.
+ */
+ if (READ_ONCE(pt->vmx_on))
+ return;
if (!event)
return;
+ pt_config_stop(event);
+
buf = perf_get_aux(&pt->handle);
if (!buf)
return;
}
}
+void intel_pt_handle_vmx(int on)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct perf_event *event;
+ unsigned long flags;
+
+ /* PT plays nice with VMX, do nothing */
+ if (pt_pmu.vmx)
+ return;
+
+ /*
+ * VMXON will clear RTIT_CTL.TraceEn; we need to make
+ * sure to not try to set it while VMX is on. Disable
+ * interrupts to avoid racing with pmu callbacks;
+ * concurrent PMI should be handled fine.
+ */
+ local_irq_save(flags);
+ WRITE_ONCE(pt->vmx_on, on);
+
+ if (on) {
+ /* prevent pt_config_stop() from writing RTIT_CTL */
+ event = pt->handle.event;
+ if (event)
+ event->hw.config = 0;
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
+
/*
* PMU callbacks
*/
struct pt *pt = this_cpu_ptr(&pt_ctx);
struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ if (READ_ONCE(pt->vmx_on))
+ return;
+
if (!buf || pt_buffer_is_full(buf, pt)) {
event->hw.state = PERF_HES_STOPPED;
return;
* see comment in intel_pt_interrupt().
*/
ACCESS_ONCE(pt->handle_nmi) = 0;
- pt_config_start(false);
+
+ pt_config_stop(event);
if (event->hw.state == PERF_HES_STOPPED)
return;
struct pt_pmu {
struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
+ bool vmx;
};
/**
* struct pt - per-cpu pt context
* @handle: perf output handle
* @handle_nmi: do handle PT PMI on this cpu, there's an active event
+ * @vmx_on: 1 if VMX is ON on this cpu
*/
struct pt {
struct perf_output_handle handle;
int handle_nmi;
+ int vmx_on;
};
#endif /* __INTEL_PT_H__ */
break;
case 60: /* Haswell */
case 69: /* Haswell-Celeron */
+ case 70: /* Haswell GT3e */
case 61: /* Broadwell */
case 71: /* Broadwell-H */
rapl_cntr_mask = RAPL_IDX_HSW;
static inline void perf_check_microcode(void) { }
#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+ extern void intel_pt_handle_vmx(int on);
+#endif
+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
extern void amd_pmu_enable_virt(void);
extern void amd_pmu_disable_virt(void);
struct irq_desc *desc;
int cpu, vector;
- BUG_ON(!data->cfg.vector);
+ if (!data->cfg.vector)
+ return;
vector = data->cfg.vector;
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
/* Make changes effective */
wrmsr
- /*
- * And make sure that all the mappings we set up have NX set from
- * the beginning.
- */
- orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
-
enable_paging:
/*
static void kvm_cpu_vmxon(u64 addr)
{
+ intel_pt_handle_vmx(1);
+
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&addr), "m"(addr)
: "memory", "cc");
static void kvm_cpu_vmxoff(void)
{
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+
+ intel_pt_handle_vmx(0);
}
static void hardware_disable(void)
void x86_configure_nx(void)
{
- /* If disable_nx is set, clear NX on all new mappings going forward. */
- if (disable_nx)
+ if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+ __supported_pte_mask |= _PAGE_NX;
+ else
__supported_pte_mask &= ~_PAGE_NX;
}
static void xen_qlock_kick(int cpu)
{
+ int irq = per_cpu(lock_kicker_irq, cpu);
+
+ /* Don't kick if the target's kicker interrupt is not initialized. */
+ if (irq == -1)
+ return;
+
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
}
u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features);
-static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
struct rbd_device *rbd_dev = (struct rbd_device *)data;
int ret;
- if (!rbd_dev)
- return;
-
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
+
+ dout("%s flushing notifies\n", __func__);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
/*
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
- bool removing;
/*
- * Don't hold the lock while doing disk operations,
- * or lock ordering will conflict with the bdev mutex via:
- * rbd_add() -> blkdev_get() -> rbd_open()
+ * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
+ * try to update its size. If REMOVING is set, updating size
+ * is just useless work since the device can't be opened.
*/
- spin_lock_irq(&rbd_dev->lock);
- removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
- spin_unlock_irq(&rbd_dev->lock);
- /*
- * If the device is being removed, rbd_dev->disk has
- * been destroyed, so don't try to update its size
- */
- if (!removing) {
+ if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
+ !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
__le64 features;
__le64 incompat;
} __attribute__ ((packed)) features_buf = { 0 };
- u64 incompat;
+ u64 unsup;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
if (ret < sizeof (features_buf))
return -ERANGE;
- incompat = le64_to_cpu(features_buf.incompat);
- if (incompat & ~RBD_FEATURES_SUPPORTED)
+ unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
+ if (unsup) {
+ rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
+ unsup);
return -ENXIO;
+ }
*snap_features = le64_to_cpu(features_buf.features);
return ret;
}
+/*
+ * rbd_dev->header_rwsem must be locked for write and will be unlocked
+ * upon return.
+ */
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
ret = rbd_dev_id_get(rbd_dev);
if (ret)
- return ret;
+ goto err_out_unlock;
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
/* Everything's ready. Announce the disk to the world. */
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- add_disk(rbd_dev->disk);
+ up_write(&rbd_dev->header_rwsem);
+ add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
+err_out_unlock:
+ up_write(&rbd_dev->header_rwsem);
return ret;
}
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
+ down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0)
goto err_out_rbd_dev;
return rc;
err_out_rbd_dev:
+ up_write(&rbd_dev->header_rwsem);
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
return ret;
rbd_dev_header_unwatch_sync(rbd_dev);
- /*
- * flush remaining watch callbacks - these must be complete
- * before the osd_client is shutdown
- */
- dout("%s: flushing notifies", __func__);
- ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
/*
* Don't free anything from rbd_dev->disk until after all
wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
j_cdbs->prev_cpu_wall = cur_wall_time;
- if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
- idle_time = 0;
- } else {
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
- j_cdbs->prev_cpu_idle = cur_idle_time;
- }
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
if (err)
goto skip_tar;
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
mci = i7_dev->mci;
pvt = mci->pvt_info;
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
pvt = mci->pvt_info;
/*
{ NULL_GUID, "", NULL },
};
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
- char u = var_name[*match];
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
return true;
- /* Case sensitive match */
- if (!c && *match == len)
- return true;
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
- if (c != u)
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
return false;
-
- if (!c)
- return true;
+ }
}
- return true;
}
bool
return 0;
}
-static void gpio_rcar_irq_bus_lock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_get_sync(&p->pdev->dev);
-}
-
-static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
-
-static int gpio_rcar_irq_request_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- return 0;
-}
-
-static void gpio_rcar_irq_release_resources(struct irq_data *d)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
- struct gpio_rcar_priv *p = gpiochip_get_data(gc);
-
- pm_runtime_put(&p->pdev->dev);
-}
-
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
- int error;
-
- error = pm_runtime_get_sync(&p->pdev->dev);
- if (error < 0)
- return error;
-
- error = pinctrl_request_gpio(chip->base + offset);
- if (error)
- pm_runtime_put(&p->pdev->dev);
-
- return error;
+ return pinctrl_request_gpio(chip->base + offset);
}
static void gpio_rcar_free(struct gpio_chip *chip, unsigned offset)
{
- struct gpio_rcar_priv *p = gpiochip_get_data(chip);
-
pinctrl_free_gpio(chip->base + offset);
- /* Set the GPIO as an input to ensure that the next GPIO request won't
+ /*
+ * Set the GPIO as an input to ensure that the next GPIO request won't
* drive the GPIO pin as an output.
*/
gpio_rcar_config_general_input_output_mode(chip, offset, false);
-
- pm_runtime_put(&p->pdev->dev);
}
static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
}
pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
- irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
- irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
- irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
- irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add_data(gpio_chip, p);
err1:
gpiochip_remove(gpio_chip);
err0:
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
}
gpiochip_remove(&p->gpio_chip);
+ pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
lookup = kmalloc(sizeof(*lookup), GFP_KERNEL);
if (lookup) {
lookup->adev = adev;
- lookup->con_id = con_id;
+ lookup->con_id = kstrdup(con_id, GFP_KERNEL);
list_add_tail(&lookup->node, &acpi_crs_lookup_list);
}
}
return amdgpu_atpx_priv.atpx_detected;
}
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
- return amdgpu_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* amdgpu_atpx_call - call an ATPX method
*
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+ if (amdgpu_device_is_px(ddev))
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v7_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
#define mmMC_SEQ_MISC0_FIJI 0xA71
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
goto fail;
}
+ /*
+ * Set the GPU linear window to be at the end of the DMA window, where
+ * the CMA area is likely to reside. This ensures that we are able to
+ * map the command buffers while having the linear window overlap as
+ * much RAM as possible, so we can optimize mappings for other buffers.
+ *
+ * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
+ * to different views of the memory on the individual engines.
+ */
+ if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+ (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+ u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
+ if (dma_mask < PHYS_OFFSET + SZ_2G)
+ gpu->memory_base = PHYS_OFFSET;
+ else
+ gpu->memory_base = dma_mask - SZ_2G + 1;
+ }
+
ret = etnaviv_hw_reset(gpu);
if (ret)
goto fail;
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
- u32 dma_mask;
int err = 0;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- /*
- * Set the GPU linear window to be at the end of the DMA window, where
- * the CMA area is likely to reside. This ensures that we are able to
- * map the command buffers while having the linear window overlap as
- * much RAM as possible, so we can optimize mappings for other buffers.
- */
- dma_mask = (u32)dma_get_required_mask(dev);
- if (dma_mask < PHYS_OFFSET + SZ_2G)
- gpu->memory_base = PHYS_OFFSET;
- else
- gpu->memory_base = dma_mask - SZ_2G + 1;
-
/* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
if (IS_ERR(gpu->mmio))
WREG32(VM_CONTEXT1_CNTL, 0);
}
+static const unsigned ni_dig_offsets[] =
+{
+ NI_DIG0_REGISTER_OFFSET,
+ NI_DIG1_REGISTER_OFFSET,
+ NI_DIG2_REGISTER_OFFSET,
+ NI_DIG3_REGISTER_OFFSET,
+ NI_DIG4_REGISTER_OFFSET,
+ NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+ EVERGREEN_DP0_REGISTER_OFFSET,
+ EVERGREEN_DP1_REGISTER_OFFSET,
+ EVERGREEN_DP2_REGISTER_OFFSET,
+ EVERGREEN_DP3_REGISTER_OFFSET,
+ EVERGREEN_DP4_REGISTER_OFFSET,
+ EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+ unsigned crtc_id, unsigned *ret_dig_fe)
+{
+ unsigned i;
+ unsigned dig_fe;
+ unsigned dig_be;
+ unsigned dig_en_be;
+ unsigned uniphy_pll;
+ unsigned digs_fe_selected;
+ unsigned dig_be_mode;
+ unsigned dig_fe_mask;
+ bool is_enabled = false;
+ bool found_crtc = false;
+
+ /* loop through all running dig_fe to find selected crtc */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+ /* found running pipe */
+ found_crtc = true;
+ dig_fe_mask = 1 << i;
+ dig_fe = i;
+ break;
+ }
+ }
+
+ if (found_crtc) {
+ /* loop through all running dig_be to find selected dig_fe */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+ /* if dig_fe_selected by dig_be? */
+ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+ if (dig_fe_mask & digs_fe_selected &&
+ /* if dig_be in sst mode? */
+ dig_be_mode == NI_DIG_BE_DPSST) {
+ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+ ni_dig_offsets[i]);
+ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+ ni_tx_offsets[i]);
+ /* dig_be enable and tx is running */
+ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+ is_enabled = true;
+ *ret_dig_fe = dig_fe;
+ break;
+ }
+ }
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+ unsigned dig_fe)
+{
+ unsigned stream_ctrl;
+ unsigned fifo_ctrl;
+ unsigned counter = 0;
+
+ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+ DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+ msleep(1);
+ counter++;
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ }
+ if (counter >= 32 )
+ DRM_ERROR("counter exceeds %d\n", counter);
+
+ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
+ unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
break;
udelay(1);
}
-
+ /*we should disable dig if it drives dp sst*/
+ /*but we are in radeon_device_init and the topology is unknown*/
+ /*and it is available after radeon_modeset_init*/
+ /*the following method radeon_atom_encoder_dpms_dig*/
+ /*does the job if we initialize it properly*/
+ /*for now we do it this manually*/
+ /**/
+ if (ASIC_IS_DCE5(rdev) &&
+ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_blank_dp_output(rdev, dig_fe);
+ /*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
+# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
+
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL 0x7144
+# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
+# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
+# define NI_DIG_BE_DPSST 0
/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
+# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO 0x7310
+# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
+# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
+
#endif
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
+ int put_count = 0;
lockdep_assert_held(&bo->resv->lock.base);
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- list_del_init(&bo->swap);
- list_del_init(&bo->lru);
-
- } else {
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- list_move_tail(&bo->swap, &bo->glob->swap_lru);
-
- man = &bdev->man[bo->mem.mem_type];
- list_move_tail(&bo->lru, &man->lru);
- }
+ put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
return 0;
}
+static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.enable = virtio_gpu_crtc_enable,
.disable = virtio_gpu_crtc_disable,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
+ .atomic_flush = virtio_gpu_crtc_atomic_flush,
};
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
- &vmw_cmd_ok, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
true, false, true),
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
- mode->hdisplay *
- (var->bits_per_pixel + 7) / 8,
- mode->vdisplay)) {
+ mode->hdisplay *
+ DIV_ROUND_UP(var->bits_per_pixel, 8),
+ mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
config I2C_XLP9XX
tristate "XLP9XX I2C support"
- depends on CPU_XLP || COMPILE_TEST
+ depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
- the Broadcom XLP9xx/XLP5xx MIPS processors.
+ the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
This driver can also be built as a module. If so, the module will
be called i2c-xlp9xx.
cbd_t __iomem *rbase;
u_char *txbuf[CPM_MAXBD];
u_char *rxbuf[CPM_MAXBD];
- u32 txdma[CPM_MAXBD];
- u32 rxdma[CPM_MAXBD];
+ dma_addr_t txdma[CPM_MAXBD];
+ dma_addr_t rxdma[CPM_MAXBD];
};
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
return -EIO;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (i = 0; i < num; i++, msgs++) {
stop = (i == num - 1);
}
out:
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
return ret;
}
return -ENOENT;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
platform_set_drvdata(pdev, i2c);
+ clk_disable(i2c->clk);
+
+ return 0;
+
err_clk:
clk_disable_unprepare(i2c->clk);
return ret;
i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
return 0;
}
i2c->suspended = 1;
+ clk_unprepare(i2c->clk);
+
return 0;
}
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
int ret = 0;
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
ret = exynos5_hsi2c_clock_setup(i2c);
if (ret) {
}
exynos5_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
};
static const struct of_device_id rk3x_i2c_match[] = {
{ .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
{ .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+ { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
{ .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
{},
};
NULL);
/* Coudn't find default GID location */
- WARN_ON(ix < 0);
+ if (WARN_ON(ix < 0))
+ goto release;
zattr_type.gid_type = gid_type;
#include <asm/uaccess.h>
+#include <rdma/ib.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
#include <asm/uaccess.h>
+#include <rdma/ib.h>
+
#include "uverbs.h"
MODULE_AUTHOR("Roland Dreier");
int srcu_key;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (count < sizeof hdr)
return -EINVAL;
void ib_drain_qp(struct ib_qp *qp)
{
ib_drain_sq(qp);
- ib_drain_rq(qp);
+ if (!qp->srq)
+ ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_qp);
dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_va) {
+ if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+ if (is_t4(rdev->lldi.adapter_type))
+ return NULL;
+
return rdev->bar2_kva + bar2_qoffset;
}
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
void c4iw_drain_sq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
- wait_for_completion(&qp->sq_drained);
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_sq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
+
+ if (need_to_wait)
+ wait_for_completion(&qp->sq_drained);
}
void c4iw_drain_rq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
+
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_rq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
- wait_for_completion(&qp->rq_drained);
+ if (need_to_wait)
+ wait_for_completion(&qp->rq_drained);
}
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
* skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
- if (!netif_carrier_ok(netdev))
- return NETDEV_TX_OK;
-
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
#include <linux/export.h>
#include <linux/uio.h>
+#include <rdma/ib.h>
+
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
spin_unlock_irqrestore(&qp->s_hlock, flags);
if (nreq) {
if (call_send)
- rdi->driver_f.schedule_send_no_lock(qp);
- else
rdi->driver_f.do_send(qp);
+ else
+ rdi->driver_f.schedule_send_no_lock(qp);
}
return err;
}
printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string);
- /*
- * this is a security check.
- * an exploit using an incorrect bInterfaceNumber is known
- */
- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
- return -ENODEV;
-
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else if (ifnum < dev->actconfig->desc.bNumInterfaces)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- int nonblocking)
+ void *pb, int nonblocking)
{
unsigned long flags;
int ret;
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
- * Verifying planes is NOT necessary since it already has been checked
- * before the buffer is queued/prepared. So it can never fail.
*/
- list_del(&(*vb)->done_entry);
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
struct vb2_buffer *vb = NULL;
int ret;
- ret = __vb2_get_done_vb(q, &vb, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
if (ret < 0)
return ret;
if (!vb2_is_streaming(q) || q->error)
return POLLERR;
+ /*
+ * If this quirk is set and QBUF hasn't been called yet then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ * This quirk is set by V4L2 for backwards compatibility reasons.
+ */
+ if (q->quirk_poll_must_check_waiting_for_buffers &&
+ q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+ return POLLERR;
+
/*
* For output streams you can call write() as long as there are fewer
* buffers queued than there are buffers available.
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start, nr, write, 1, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
return 0;
}
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+ return __verify_planes_array(vb, pb);
+}
+
/**
* __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value.
}
static const struct vb2_buf_ops v4l2_buf_ops = {
+ .verify_planes_array = __verify_planes_array_core,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp,
q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
== V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ q->quirk_poll_must_check_waiting_for_buffers = true;
return vb2_core_queue_init(q);
}
poll_wait(file, &fh->wait, wait);
}
- /*
- * For compatibility with vb1: if QBUF hasn't been called yet, then
- * return POLLERR as well. This only affects capture queues, output
- * queues will always initialize waiting_for_buffers to false.
- */
- if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
- return POLLERR;
-
return res | vb2_core_poll(q, file, wait);
}
EXPORT_SYMBOL_GPL(vb2_poll);
cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ if (cxl_ops->irq_wait)
+ cxl_ops->irq_wait(ctx);
+
/* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
put_pid(ctx->glpid);
#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
+#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
u64 dsisr, u64 errstat);
irqreturn_t (*psl_interrupt)(int irq, void *data);
int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+ void (*irq_wait)(struct cxl_context *ctx);
int (*attach_process)(struct cxl_context *ctx, bool kernel,
u64 wed, u64 amr);
int (*detach_process)(struct cxl_context *ctx);
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
- irq_dispose_mapping(virq);
}
int cxl_register_one_irq(struct cxl *adapter,
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/synch.h>
#include <misc/cxl-base.h>
return fail_psl_irq(afu, &irq_info);
}
+void native_irq_wait(struct cxl_context *ctx)
+{
+ u64 dsisr;
+ int timeout = 1000;
+ int ph;
+
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ while (timeout--) {
+ ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
+ if (ph != ctx->pe)
+ return;
+ dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+ if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+ return;
+ /*
+ * We are waiting for the workqueue to process our
+ * irq, so need to let that run here.
+ */
+ msleep(1);
+ }
+
+ dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
+ " DSISR %016llx!\n", ph, dsisr);
+ return;
+}
+
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
.handle_psl_slice_error = native_handle_psl_slice_error,
.psl_interrupt = NULL,
.ack_irq = native_ack_irq,
+ .irq_wait = native_irq_wait,
.attach_process = native_attach_process,
.detach_process = native_detach_process,
.support_attributes = native_support_attributes,
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
depends on MMC_SDHCI && ACPI
+ select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
identified by ACPI Compatibility ID PNP0D40 or specific
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
#include "sdhci.h"
enum {
.ops = &sdhci_acpi_ops_int,
};
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+ static const struct x86_cpu_id byt[] = {
+ { X86_VENDOR_INTEL, 6, 0x37 },
+ {}
+ };
+
+ return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+ u32 val = 0;
+
+ if (!sdhci_acpi_byt())
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ if (!sdhci_acpi_byt())
+ return false;
+
+ if (!iosf_mbi_available())
+ return true;
+
+ sdhci_acpi_byt_setting(dev);
+
+ return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
+ if (sdhci_acpi_byt_defer(dev))
+ return -EPROBE_DEFER;
+
hid = acpi_device_hid(device);
uid = device->pnp.unique_id;
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_resume_host(c->host);
}
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_runtime_resume_host(c->host);
}
MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+ /* TODO MMC DDR is not working on A80 */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun9i-a80-mmc"))
+ mmc->caps &= ~MMC_CAP_1_8V_DDR;
+
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
unsigned int nq0 = adap2pinfo(adap, 0)->nqsets;
unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1;
u8 cpus[SGE_QSETS + 1];
- u16 rspq_map[RSS_TABLE_SIZE];
+ u16 rspq_map[RSS_TABLE_SIZE + 1];
for (i = 0; i < SGE_QSETS; ++i)
cpus[i] = i;
rspq_map[i] = i % nq0;
rspq_map[i + RSS_TABLE_SIZE / 2] = (i % nq1) + nq0;
}
+ rspq_map[RSS_TABLE_SIZE] = 0xffff; /* terminator */
t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN |
F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN |
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/of_net.h>
#include "smsc75xx.h"
#define SMSC_CHIPNAME "smsc75xx"
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
+ const u8 *mac_addr;
+
+ /* maybe the boot loader passed the MAC address in devicetree */
+ mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ return;
+ }
+
/* try reading mac address from EEPROM */
if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
dev->net->dev_addr) == 0) {
}
}
- /* no eeprom, or eeprom values are invalid. generate random MAC */
+ /* no useful static MAC address found. generate a random one */
eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
#include <linux/crc32.h>
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
+#include <linux/of_net.h>
#include "smsc95xx.h"
#define SMSC_CHIPNAME "smsc95xx"
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
+ const u8 *mac_addr;
+
+ /* maybe the boot loader passed the MAC address in devicetree */
+ mac_addr = of_get_mac_address(dev->udev->dev.of_node);
+ if (mac_addr) {
+ memcpy(dev->net->dev_addr, mac_addr, ETH_ALEN);
+ return;
+ }
+
/* try reading mac address from EEPROM */
if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
dev->net->dev_addr) == 0) {
}
}
- /* no eeprom, or eeprom values are invalid. generate random MAC */
+ /* no useful static MAC address found. generate a random one */
eth_hw_addr_random(dev->net);
netif_dbg(dev, ifup, dev->net, "MAC address set to eth_random_addr\n");
}
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
-#define HCI_HOTKEY_ENABLE 0x01
+#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
/* Create device class needed by udev */
dev_class = class_create(THIS_MODULE, DRV_NAME);
- if (!dev_class) {
+ if (IS_ERR(dev_class)) {
rmcd_error("Unable to create " DRV_NAME " class");
- return -EINVAL;
+ return PTR_ERR(dev_class);
}
ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
+ unsigned long copied;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+ copied = PAGE_SIZE -
+ copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+ if (offsetof(struct sccb_header, length) +
+ sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT;
goto out_free;
}
- if (sccb->length > PAGE_SIZE || sccb->length < 8)
- return -EINVAL;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
- rc = -EFAULT;
+ if (sccb->length < 8) {
+ rc = -EINVAL;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
static int vpfe_update_pipe_state(struct vpfe_video_device *video)
{
struct vpfe_pipeline *pipe = &video->pipe;
+ int ret;
- if (vpfe_prepare_pipeline(video))
- return vpfe_prepare_pipeline(video);
+ ret = vpfe_prepare_pipeline(video);
+ if (ret)
+ return ret;
/*
* Find out if there is any input video
*/
if (pipe->input_num == 0) {
pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
- if (vpfe_update_current_ext_subdev(video)) {
+ ret = vpfe_update_current_ext_subdev(video);
+ if (ret) {
pr_err("Invalid external subdev\n");
- return vpfe_update_current_ext_subdev(video);
+ return ret;
}
} else {
pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
struct v4l2_subdev *subdev;
struct v4l2_format format;
struct media_pad *remote;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
sd_fmt.pad = remote->index;
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
/* get output format of remote subdev */
- if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote subdev for video node\n");
- return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ return ret;
}
/* convert to pix format */
mbus.code = sd_fmt.format.code;
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
/* If streaming is started, return error */
return -EBUSY;
}
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
video->fmt = *fmt;
return 0;
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
return 0;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
/*
* If streaming is started return device busy
* error
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
/* Call decoder driver function to set the standard */
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
sdinfo = video->current_ext_subdev;
/* If streaming is started, return device busy error */
if (video->started) {
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
if (video->io_usrs != 0) {
v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- if (vb2_queue_init(q)) {
+ ret = vb2_queue_init(q);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
- return vb2_queue_init(q);
+ return ret;
}
fh->io_allowed = 1;
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
vpfe_stop_capture(video);
ret = vb2_streamoff(&video->buffer_queue, buf_type);
- Remove unneeded file entries in sysfs
- Remove software processing of IB protocol and place in library for use
by qib, ipath (if still present), hfi1, and eventually soft-roce
-
+- Replace incorrect uAPI
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <rdma/ib.h>
+
#include "hfi.h"
#include "pio.h"
#include "device.h"
int uctxt_required = 1;
int must_be_root = 0;
+ /* FIXME: This interface cannot continue out of staging */
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd)) {
ret = -EINVAL;
goto bail;
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd->rcd[uctxt->ctxt] = NULL;
+
+ hfi1_user_exp_rcv_free(fdata);
+ hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+
uctxt->rcvwait_to = 0;
uctxt->piowait_to = 0;
uctxt->rcvnowait = 0;
uctxt->pionowait = 0;
uctxt->event_flags = 0;
- hfi1_user_exp_rcv_free(fdata);
- hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
-
hfi1_stats.sps_ctxts--;
if (++dd->freectxts == dd->num_user_contexts)
aspm_enable_all(dd);
static int user_init(struct file *fp)
{
- int ret;
unsigned int rcvctrl_ops = 0;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
/* make sure that the context has already been setup */
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
- ret = -EFAULT;
- goto done;
- }
-
- /*
- * Subctxts don't need to initialize anything since master
- * has done it.
- */
- if (fd->subctxt) {
- ret = wait_event_interruptible(uctxt->wait, !test_bit(
- HFI1_CTXT_MASTER_UNINIT,
- &uctxt->event_flags));
- goto expected;
- }
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return -EFAULT;
/* initialize poll variables... */
uctxt->urgent = 0;
wake_up(&uctxt->wait);
}
-expected:
- /*
- * Expected receive has to be setup for all processes (including
- * shared contexts). However, it has to be done after the master
- * context has been fully configured as it depends on the
- * eager/expected split of the RcvArray entries.
- * Setting it up here ensures that the subcontexts will be waiting
- * (due to the above wait_event_interruptible() until the master
- * is setup.
- */
- ret = hfi1_user_exp_rcv_init(fp);
-done:
- return ret;
+ return 0;
}
static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
int ret = 0;
/*
- * Context should be set up only once (including allocation and
+ * Context should be set up only once, including allocation and
* programming of eager buffers. This is done if context sharing
* is not requested or by the master process.
*/
if (ret)
goto done;
}
+ } else {
+ ret = wait_event_interruptible(uctxt->wait, !test_bit(
+ HFI1_CTXT_MASTER_UNINIT,
+ &uctxt->event_flags));
+ if (ret)
+ goto done;
}
+
ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
+ if (ret)
+ goto done;
+ /*
+ * Expected receive has to be setup for all processes (including
+ * shared contexts). However, it has to be done after the master
+ * context has been fully configured as it depends on the
+ * eager/expected split of the RcvArray entries.
+ * Setting it up here ensures that the subcontexts will be waiting
+ * (due to the above wait_event_interruptible() until the master
+ * is setup.
+ */
+ ret = hfi1_user_exp_rcv_init(fp);
if (ret)
goto done;
{
struct hfi1_devdata *dd = filp->private_data;
- switch (whence) {
- case SEEK_SET:
- break;
- case SEEK_CUR:
- offset += filp->f_pos;
- break;
- case SEEK_END:
- offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
- offset;
- break;
- default:
- return -EINVAL;
- }
-
- if (offset < 0)
- return -EINVAL;
-
- if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
- return -EINVAL;
-
- filp->f_pos = offset;
-
- return filp->f_pos;
+ return fixed_size_llseek(filp, offset, whence,
+ (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
}
/* NOTE: assumes unsigned long is 8 bytes */
struct mm_struct *,
unsigned long, unsigned long);
static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
+ struct mm_struct *,
unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
rbnode = rb_entry(node, struct mmu_rb_node, node);
rb_erase(node, root);
if (handler->ops->remove)
- handler->ops->remove(root, rbnode, false);
+ handler->ops->remove(root, rbnode, NULL);
}
}
return ret;
}
-/* Caller must host handler lock */
+/* Caller must hold handler lock */
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
unsigned long addr,
unsigned long len)
return node;
}
+/* Caller must *not* hold handler lock. */
static void __mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node, bool arg)
+ struct mmu_rb_node *node, struct mm_struct *mm)
{
+ unsigned long flags;
+
/* Validity of handler and node pointers has been checked by caller. */
hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
node->len);
+ spin_lock_irqsave(&handler->lock, flags);
__mmu_int_rb_remove(node, handler->root);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
if (handler->ops->remove)
- handler->ops->remove(handler->root, node, arg);
+ handler->ops->remove(handler->root, node, mm);
}
struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
{
struct mmu_rb_handler *handler = find_mmu_handler(root);
- unsigned long flags;
if (!handler || !node)
return;
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_rb_remove(handler, node, false);
- spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, NULL);
}
static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
static inline void mmu_notifier_page(struct mmu_notifier *mn,
struct mm_struct *mm, unsigned long addr)
{
- mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
+ mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
}
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- mmu_notifier_mem_invalidate(mn, start, end);
+ mmu_notifier_mem_invalidate(mn, mm, start, end);
}
static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
+ struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
struct rb_root *root = handler->root;
- struct mmu_rb_node *node;
+ struct mmu_rb_node *node, *ptr = NULL;
unsigned long flags;
spin_lock_irqsave(&handler->lock, flags);
- for (node = __mmu_int_rb_iter_first(root, start, end - 1); node;
- node = __mmu_int_rb_iter_next(node, start, end - 1)) {
+ for (node = __mmu_int_rb_iter_first(root, start, end - 1);
+ node; node = ptr) {
+ /* Guard against node removal. */
+ ptr = __mmu_int_rb_iter_next(node, start, end - 1);
hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
node->addr, node->len);
- if (handler->ops->invalidate(root, node))
- __mmu_rb_remove(handler, node, true);
+ if (handler->ops->invalidate(root, node)) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, mm);
+ spin_lock_irqsave(&handler->lock, flags);
+ }
}
spin_unlock_irqrestore(&handler->lock, flags);
}
struct mmu_rb_ops {
bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
int (*insert)(struct rb_root *, struct mmu_rb_node *);
- void (*remove)(struct rb_root *, struct mmu_rb_node *, bool);
+ void (*remove)(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
};
* do the flush work until that QP's
* sdma work has finished.
*/
+ spin_lock(&qp->s_lock);
if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
+ spin_unlock(&qp->s_lock);
}
/**
static int set_rcvarray_entry(struct file *, unsigned long, u32,
struct tid_group *, struct page **, unsigned);
static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
struct tid_pageset *, unsigned, u16, struct page **,
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_group *grp, *gptr;
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return 0;
/*
* The notifier would have been removed when the process'es mm
* was freed.
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false);
+ mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
continue;
if (HFI1_CAP_IS_USET(TID_UNMAP))
mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu, false);
+ &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root,
&node->mmu);
}
static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
- bool notifier)
+ struct mm_struct *mm)
{
struct hfi1_filedata *fdata =
container_of(root, struct hfi1_filedata, tid_rb_root);
static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
+ unsigned);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
static void activate_packet_queue(struct iowait *, int);
static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static struct mmu_rb_ops sdma_rb_ops = {
rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
- if (rb_node)
+ if (rb_node && !IS_ERR(rb_node))
node = container_of(rb_node, struct sdma_mmu_node, rb);
+ else
+ rb_node = NULL;
if (!node) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, pinned);
+ unpin_vector_pages(current->mm, pages, node->npages,
+ pinned);
ret = -EFAULT;
goto bail;
}
}
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned npages)
+ unsigned start, unsigned npages)
{
- hfi1_release_user_pages(mm, pages, npages, 0);
+ hfi1_release_user_pages(mm, pages + start, npages, 0);
kfree(pages);
}
&req->pq->sdma_rb_root,
(unsigned long)req->iovs[i].iov.iov_base,
req->iovs[i].iov.iov_len);
- if (!mnode)
+ if (!mnode || IS_ERR(mnode))
continue;
node = container_of(mnode, struct sdma_mmu_node, rb);
}
static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
- bool notifier)
+ struct mm_struct *mm)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
node->pq->n_locked -= node->npages;
spin_unlock(&node->pq->evict_lock);
- unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+ /*
+ * If mm is set, we are being called by the MMU notifier and we
+ * should not pass a mm_struct to unpin_vector_page(). This is to
+ * prevent a deadlock when hfi1_release_user_pages() attempts to
+ * take the mmap_sem, which the MMU notifier has already taken.
+ */
+ unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
node->npages);
/*
* If called by the MMU notifier, we have to adjust the pinned
* page count ourselves.
*/
- if (notifier)
- current->mm->pinned_vm -= node->npages;
+ if (mm)
+ mm->pinned_vm -= node->npages;
kfree(node);
}
* Every step equals (1 * 200) / 255 celsius, and finally
* need convert to millicelsius.
*/
- return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+ return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
}
static inline long _temp_to_step(long temp)
{
- return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+ return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
}
static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
struct thermal_zone_device *tz = to_thermal_zone(dev); \
\
if (tz->tzp) \
- return sprintf(buf, "%u\n", tz->tzp->name); \
+ return sprintf(buf, "%d\n", tz->tzp->name); \
else \
return -EIO; \
} \
atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
if (atomic_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
- ceph_auth_destroy_authorizer(
- s->s_mdsc->fsc->client->monc.auth,
- s->s_auth.authorizer);
+ ceph_auth_destroy_authorizer(s->s_auth.authorizer);
kfree(s);
}
}
struct ceph_auth_handshake *auth = &s->s_auth;
if (force_new && auth->authorizer) {
- ceph_auth_destroy_authorizer(ac, auth->authorizer);
+ ceph_auth_destroy_authorizer(auth->authorizer);
auth->authorizer = NULL;
}
if (!auth->authorizer) {
spin_unlock(&dlm->spinlock);
+ ret = 0;
+
done:
dlm_put(dlm);
return ret;
return page;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page;
+ int nid;
+
+ if (!pmd_present(pmd))
+ return NULL;
+
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ if (!page)
+ return NULL;
+
+ if (PageReserved(page))
+ return NULL;
+
+ nid = page_to_nid(page);
+ if (!node_isset(nid, node_states[N_MEMORY]))
+ return NULL;
+
+ return page;
+}
+#endif
+
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
pte_t *orig_pte;
pte_t *pte;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
- pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
- page = can_gather_numa_stats(huge_pte, vma, addr);
+ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
if (page)
- gather_stats(page, md, pte_dirty(huge_pte),
+ gather_stats(page, md, pmd_dirty(*pmd),
HPAGE_PMD_SIZE/PAGE_SIZE);
spin_unlock(ptl);
return 0;
if (pmd_trans_unstable(pmd))
return 0;
+#endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, vma, addr);
#endif
}
- ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
+ ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32);
if (ret < 0)
goto out_bh;
strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
- ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
+ ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128);
if (ret < 0)
goto out_bh;
uint8_t *, int);
extern int udf_put_filename(struct super_block *, const uint8_t *, int,
uint8_t *, int);
-extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int);
+extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int);
/* ialloc.c */
extern void udf_free_inode(struct inode *);
return u_len;
}
-int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len)
+int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len,
+ const uint8_t *ocu_i, int i_len)
{
- return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len,
+ int s_len = 0;
+
+ if (i_len > 0) {
+ s_len = ocu_i[i_len - 1];
+ if (s_len >= i_len) {
+ pr_err("incorrect dstring lengths (%d/%d)\n",
+ s_len, i_len);
+ return -EINVAL;
+ }
+ }
+
+ return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len,
udf_uni2char_utf8, 0);
}
*/
struct ceph_auth_client;
-struct ceph_authorizer;
struct ceph_msg;
+struct ceph_authorizer {
+ void (*destroy)(struct ceph_authorizer *);
+};
+
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
void *authorizer_buf;
struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len);
- void (*destroy_authorizer)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *auth);
-extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
struct ceph_snap_context;
struct ceph_osd_request;
struct ceph_osd_client;
-struct ceph_authorizer;
/*
* completion callback for async writepages
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
+ void (*post_attach)(void);
int (*can_fork)(struct task_struct *task);
void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task);
task_unlock(current);
}
-extern void cpuset_post_attach_flush(void);
-
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
return false;
}
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
}
struct page *get_huge_zero_page(void);
+void put_huge_zero_page(void);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
return false;
}
+static inline void put_huge_zero_page(void)
+{
+ BUILD_BUG();
+}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags)
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
- u8 irq_context;
- u8 depth;
- u16 base;
+ /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ unsigned int irq_context : 2,
+ depth : 6,
+ base : 24;
+ /* 4 byte hole */
struct hlist_node entry;
u64 chain_key;
};
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
page = compound_head(page);
if (atomic_read(compound_mapcount_ptr(page)) >= 0)
return true;
+ if (PageHuge(page))
+ return false;
for (i = 0; i < hpage_nr_pages(page); i++) {
if (atomic_read(&page[i]._mapcount) >= 0)
return true;
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
net_ratelimited_function(pr_warn, fmt, ##__VA_ARGS__)
#define net_info_ratelimited(fmt, ...) \
net_ratelimited_function(pr_info, fmt, ##__VA_ARGS__)
-#if defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define net_dbg_ratelimited(fmt, ...) \
+do { \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
+ net_ratelimit()) \
+ __dynamic_pr_debug(&descriptor, fmt, ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
#define net_dbg_ratelimited(fmt, ...) \
net_ratelimited_function(pr_debug, fmt, ##__VA_ARGS__)
#else
/**
* struct vb2_ops - driver-specific callbacks
*
+ * @verify_planes_array: Verify that a given user space structure contains
+ * enough planes for the buffer. This is called
+ * for each dequeued buffer.
* @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
* For V4L2 this is a struct v4l2_buffer.
* @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
* the vb2_buffer struct.
*/
struct vb2_buf_ops {
+ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
struct vb2_plane *planes);
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ * has not been called. This is a vb1 idiom that has been adopted
+ * also by vb2.
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
+ unsigned quirk_poll_must_check_waiting_for_buffers:1;
struct mutex *lock;
void *owner;
#define _RDMA_IB_H
#include <linux/types.h>
+#include <linux/sched.h>
struct ib_addr {
union {
__u64 sib_scope_id;
};
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
#endif /* _RDMA_IB_H */
#ifdef CONFIG_SND_HDA_I915
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
-int snd_hdac_get_display_clk(struct hdac_bus *bus);
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
bool *audio_enabled, char *buffer, int max_bytes);
{
return 0;
}
-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
- return 0;
}
static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
int rate)
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
size_t nbytes, loff_t off, bool threadgroup)
{
struct task_struct *tsk;
+ struct cgroup_subsys *ss;
struct cgroup *cgrp;
pid_t pid;
- int ret;
+ int ssid, ret;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
rcu_read_unlock();
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
+ for_each_subsys(ss, ssid)
+ if (ss->post_attach)
+ ss->post_attach();
cgroup_kn_unlock(of->kn);
- cpuset_post_attach_flush();
return ret ?: nbytes;
}
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
}
}
-void cpuset_post_attach_flush(void)
+static void cpuset_post_attach(void)
{
flush_workqueue(cpuset_migrate_mm_wq);
}
.can_attach = cpuset_can_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
+ .post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.legacy_cftypes = files,
.early_init = true,
if (ret || !write)
return ret;
- if (sysctl_perf_cpu_time_max_percent == 100) {
+ if (sysctl_perf_cpu_time_max_percent == 100 ||
+ sysctl_perf_cpu_time_max_percent == 0) {
printk(KERN_WARNING
"perf: Dynamic interrupt throttling disabled, can hang your system!\n");
WRITE_ONCE(perf_sample_allowed_ns, 0);
* function.
*
* Lock order:
+ * cred_guard_mutex
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event::child_mutex;
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
- int err;
rcu_read_lock();
if (!vpid)
if (!task)
return ERR_PTR(-ESRCH);
- /* Reuse ptrace permission checks for now. */
- err = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
- goto errout;
-
return task;
-errout:
- put_task_struct(task);
- return ERR_PTR(err);
-
}
/*
get_online_cpus();
+ if (task) {
+ err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+ if (err)
+ goto err_cpus;
+
+ /*
+ * Reuse ptrace permission checks for now.
+ *
+ * We must hold cred_guard_mutex across this and any potential
+ * perf_install_in_context() call for this new event to
+ * serialize against exec() altering our credentials (and the
+ * perf_event_exit_task() that could imply).
+ */
+ err = -EACCES;
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+ goto err_cred;
+ }
+
if (flags & PERF_FLAG_PID_CGROUP)
cgroup_fd = pid;
NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
- goto err_cpus;
+ goto err_cred;
}
if (is_sampling_event(event)) {
goto err_context;
}
- if (task) {
- put_task_struct(task);
- task = NULL;
- }
-
/*
* Look up the group leader (we will attach this event to it):
*/
WARN_ON_ONCE(ctx->parent_ctx);
+ /*
+ * This is the point on no return; we cannot fail hereafter. This is
+ * where we start modifying current state.
+ */
+
if (move_group) {
/*
* See perf_event_ctx_lock() for comments on the details
mutex_unlock(&gctx->mutex);
mutex_unlock(&ctx->mutex);
+ if (task) {
+ mutex_unlock(&task->signal->cred_guard_mutex);
+ put_task_struct(task);
+ }
+
put_online_cpus();
mutex_lock(¤t->perf_event_mutex);
*/
if (!event_file)
free_event(event);
+err_cred:
+ if (task)
+ mutex_unlock(&task->signal->cred_guard_mutex);
err_cpus:
put_online_cpus();
err_task:
/*
* When a child task exits, feed back event values to parent events.
+ *
+ * Can be called with cred_guard_mutex held when called from
+ * install_exec_creds().
*/
void perf_event_exit_task(struct task_struct *child)
{
#define pr_fmt(fmt) "kcov: " fmt
+#define DISABLE_BRANCH_PROFILING
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/file.h>
* Entry point from instrumented code.
* This is called once per basic-block/edge.
*/
-void __sanitizer_cov_trace_pc(void)
+void notrace __sanitizer_cov_trace_pc(void)
{
struct task_struct *t;
enum kcov_mode mode;
VMCOREINFO_OFFSET(page, lru);
VMCOREINFO_OFFSET(page, _mapcount);
VMCOREINFO_OFFSET(page, private);
+ VMCOREINFO_OFFSET(page, compound_dtor);
+ VMCOREINFO_OFFSET(page, compound_order);
+ VMCOREINFO_OFFSET(page, compound_head);
VMCOREINFO_OFFSET(pglist_data, node_zones);
VMCOREINFO_OFFSET(pglist_data, nr_zones);
#ifdef CONFIG_FLAT_NODE_MEM_MAP
#ifdef CONFIG_X86
VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
#endif
-#ifdef CONFIG_HUGETLBFS
- VMCOREINFO_SYMBOL(free_huge_page);
+#ifdef CONFIG_HUGETLB_PAGE
+ VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
#endif
arch_crash_save_vmcoreinfo();
chain->irq_context = hlock->irq_context;
i = get_first_held_lock(curr, hlock);
chain->depth = curr->lockdep_depth + 1 - i;
+
+ BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
+ BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
+ BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
+
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
chain->base = nr_chain_hlocks;
- nr_chain_hlocks += chain->depth;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = curr->held_locks[i].class_idx - 1;
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
}
+
+ if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
+ nr_chain_hlocks += chain->depth;
+
+#ifdef CONFIG_DEBUG_LOCKDEP
+ /*
+ * Important for check_no_collision().
+ */
+ if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
+ if (debug_locks_off_graph_unlock())
+ return 0;
+
+ print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+ dump_stack();
+ return 0;
+ }
+#endif
+
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
return 1;
}
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+ return 2 * !!task->hardirq_context + !!task->softirq_context;
+}
+
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
/*
* Keep track of points where we cross into an interrupt context:
*/
- hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
- curr->softirq_context;
if (depth) {
struct held_lock *prev_hlock;
return 1;
}
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+ return 0;
+}
+
static inline int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
+ hlock->irq_context = task_irq_context(curr);
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
int i;
if (v == SEQ_START_TOKEN) {
+ if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
+ seq_printf(m, "(buggered) ");
seq_printf(m, "all lock chains:\n");
return 0;
}
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+ /*
+ * The following mb guarantees that previous clear of a PENDING bit
+ * will not be reordered with any speculative LOADS or STORES from
+ * work->current_func, which is executed afterwards. This possible
+ * reordering can lead to a missed execution on attempt to qeueue
+ * the same @work. E.g. consider this case:
+ *
+ * CPU#0 CPU#1
+ * ---------------------------- --------------------------------
+ *
+ * 1 STORE event_indicated
+ * 2 queue_work_on() {
+ * 3 test_and_set_bit(PENDING)
+ * 4 } set_..._and_clear_pending() {
+ * 5 set_work_data() # clear bit
+ * 6 smp_mb()
+ * 7 work->current_func() {
+ * 8 LOAD event_indicated
+ * }
+ *
+ * Without an explicit full barrier speculative LOAD on line 8 can
+ * be executed before CPU#0 does STORE on line 1. If that happens,
+ * CPU#0 observes the PENDING bit is still set and new execution of
+ * a @work is not queued in a hope, that CPU#1 will eventually
+ * finish the queued @work. Meanwhile CPU#1 does not see
+ * event_indicated is set, because speculative LOAD was executed
+ * before actual STORE.
+ */
+ smp_mb();
}
static void clear_work_data(struct work_struct *work)
goto fast_exit;
hash = hash_stack(trace->entries, trace->nr_entries);
- /* Bad luck, we won't store this stack. */
- if (hash == 0)
- goto exit;
-
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
return READ_ONCE(huge_zero_page);
}
-static void put_huge_zero_page(void)
+void put_huge_zero_page(void)
{
/*
* Counter should never go to zero here. Only shrinker can put
if (vma_is_dax(vma)) {
spin_unlock(ptl);
if (is_huge_zero_pmd(orig_pmd))
- put_huge_zero_page();
+ tlb_remove_page(tlb, pmd_page(orig_pmd));
} else if (is_huge_zero_pmd(orig_pmd)) {
pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(ptl);
- put_huge_zero_page();
+ tlb_remove_page(tlb, pmd_page(orig_pmd));
} else {
struct page *page = pmd_page(orig_pmd);
page_remove_rmap(page, true);
* page fault if needed.
*/
return 0;
- if (vma->vm_ops)
+ if (vma->vm_ops || (vm_flags & VM_NO_THP))
/* khugepaged not yet working on file or special mappings */
return 0;
- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
return false;
if (is_vma_temporary_stack(vma))
return false;
- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
- return true;
+ return !(vma->vm_flags & VM_NO_THP);
}
static void collapse_huge_page(struct mm_struct *mm,
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
spinlock_t lock; /* for from, to */
+ struct mm_struct *mm;
struct mem_cgroup *from;
struct mem_cgroup *to;
unsigned long flags;
static void mem_cgroup_clear_mc(void)
{
+ struct mm_struct *mm = mc.mm;
+
/*
* we must clear moving_task before waking up waiters at the end of
* task migration.
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
+ mc.mm = NULL;
spin_unlock(&mc.lock);
+
+ mmput(mm);
}
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
VM_BUG_ON(mc.moved_swap);
spin_lock(&mc.lock);
+ mc.mm = mm;
mc.from = from;
mc.to = memcg;
mc.flags = move_flags;
ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
+ } else {
+ mmput(mm);
}
- mmput(mm);
return ret;
}
return ret;
}
-static void mem_cgroup_move_charge(struct mm_struct *mm)
+static void mem_cgroup_move_charge(void)
{
struct mm_walk mem_cgroup_move_charge_walk = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mm,
+ .mm = mc.mm,
};
lru_add_drain_all();
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
* additional charge, the page walk just aborts.
*/
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
- up_read(&mm->mmap_sem);
+ up_read(&mc.mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
- struct cgroup_subsys_state *css;
- struct task_struct *p = cgroup_taskset_first(tset, &css);
- struct mm_struct *mm = get_task_mm(p);
-
- if (mm) {
- if (mc.to)
- mem_cgroup_move_charge(mm);
- mmput(mm);
- }
- if (mc.to)
+ if (mc.to) {
+ mem_cgroup_move_charge();
mem_cgroup_clear_mc();
+ }
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
}
#endif
.css_reset = mem_cgroup_css_reset,
.can_attach = mem_cgroup_can_attach,
.cancel_attach = mem_cgroup_cancel_attach,
- .attach = mem_cgroup_move_task,
+ .post_attach = mem_cgroup_move_task,
.bind = mem_cgroup_bind,
.dfl_cftypes = memory_files,
.legacy_cftypes = mem_cgroup_legacy_files,
}
}
- return get_page_unless_zero(head);
+ if (get_page_unless_zero(head)) {
+ if (head == compound_head(page))
+ return 1;
+
+ pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
+ put_page(head);
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(get_hwpoison_page);
return pfn_to_page(pfn);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd)
+{
+ unsigned long pfn = pmd_pfn(pmd);
+
+ /*
+ * There is no pmd_special() but there may be special pmds, e.g.
+ * in a direct-access (dax) mapping, so let's just replicate the
+ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+ */
+ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ if (!pfn_valid(pfn))
+ return NULL;
+ goto out;
+ } else {
+ unsigned long off;
+ off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+ }
+
+ if (is_zero_pfn(pfn))
+ return NULL;
+ if (unlikely(pfn > highest_memmap_pfn))
+ return NULL;
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+ */
+out:
+ return pfn_to_page(pfn);
+}
+#endif
+
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
/* Soft-offlined page shouldn't go through lru cache list */
- if (reason == MR_MEMORY_FAILURE) {
+ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+ /*
+ * With this release, we free successfully migrated
+ * page and set PG_HWPoison on just freed page
+ * intentionally. Although it's rather weird, it's how
+ * HWPoison flag works at the moment.
+ */
put_page(page);
if (!test_set_page_hwpoison(page))
num_poisoned_pages_inc();
ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
if (!ret) {
- swap_slot_free_notify(page);
+ if (trylock_page(page)) {
+ swap_slot_free_notify(page);
+ unlock_page(page);
+ }
+
count_vm_event(PSWPIN);
return 0;
}
zone = NULL;
}
+ if (is_huge_zero_page(page)) {
+ put_huge_zero_page();
+ continue;
+ }
+
page = compound_head(page);
if (!put_page_testzero(page))
continue;
sc->gfp_mask |= __GFP_HIGHMEM;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- requested_highidx, sc->nodemask) {
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
enum zone_type classzone_idx;
if (!populated_zone(zone))
/* Try to sleep for a short interval */
if (prepare_kswapd_sleep(pgdat, order, remaining,
balanced_classzone_idx)) {
+ /*
+ * Compaction records what page blocks it recently failed to
+ * isolate pages from and skips them in the future scanning.
+ * When kswapd is going to sleep, it is reasonable to assume
+ * that pages and compaction may succeed so reset the cache.
+ */
+ reset_isolation_suitable(pgdat);
+
+ /*
+ * We have freed the memory, now we should compact it to make
+ * allocation of the requested order possible.
+ */
+ wakeup_kcompactd(pgdat, order, classzone_idx);
+
remaining = schedule_timeout(HZ/10);
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- /*
- * Compaction records what page blocks it recently failed to
- * isolate pages from and skips them in the future scanning.
- * When kswapd is going to sleep, it is reasonable to assume
- * that pages and compaction may succeed so reset the cache.
- */
- reset_isolation_suitable(pgdat);
-
- /*
- * We have freed the memory, now we should compact it to make
- * allocation of the requested order possible.
- */
- wakeup_kcompactd(pgdat, order, classzone_idx);
-
if (!kthread_should_stop())
schedule();
}
EXPORT_SYMBOL(ceph_auth_create_authorizer);
-void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
{
- mutex_lock(&ac->mutex);
- if (ac->ops && ac->ops->destroy_authorizer)
- ac->ops->destroy_authorizer(ac, a);
- mutex_unlock(&ac->mutex);
+ a->destroy(a);
}
EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
struct ceph_auth_none_info *xi = ac->private;
xi->starting = true;
- xi->built_authorizer = false;
}
static void destroy(struct ceph_auth_client *ac)
return xi->starting;
}
+static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
+ struct ceph_none_authorizer *au)
+{
+ void *p = au->buf;
+ void *const end = p + sizeof(au->buf);
+ int ret;
+
+ ceph_encode_8_safe(&p, end, 1, e_range);
+ ret = ceph_entity_name_encode(ac->name, &p, end);
+ if (ret < 0)
+ return ret;
+
+ ceph_encode_64_safe(&p, end, ac->global_id, e_range);
+ au->buf_len = p - (void *)au->buf;
+ dout("%s built authorizer len %d\n", __func__, au->buf_len);
+ return 0;
+
+e_range:
+ return -ERANGE;
+}
+
static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
{
return 0;
return result;
}
+static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
+{
+ kfree(a);
+}
+
/*
- * build an 'authorizer' with our entity_name and global_id. we can
- * reuse a single static copy since it is identical for all services
- * we connect to.
+ * build an 'authorizer' with our entity_name and global_id. it is
+ * identical for all services we connect to.
*/
static int ceph_auth_none_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
{
- struct ceph_auth_none_info *ai = ac->private;
- struct ceph_none_authorizer *au = &ai->au;
- void *p, *end;
+ struct ceph_none_authorizer *au;
int ret;
- if (!ai->built_authorizer) {
- p = au->buf;
- end = p + sizeof(au->buf);
- ceph_encode_8(&p, 1);
- ret = ceph_entity_name_encode(ac->name, &p, end - 8);
- if (ret < 0)
- goto bad;
- ceph_decode_need(&p, end, sizeof(u64), bad2);
- ceph_encode_64(&p, ac->global_id);
- au->buf_len = p - (void *)au->buf;
- ai->built_authorizer = true;
- dout("built authorizer len %d\n", au->buf_len);
+ au = kmalloc(sizeof(*au), GFP_NOFS);
+ if (!au)
+ return -ENOMEM;
+
+ au->base.destroy = ceph_auth_none_destroy_authorizer;
+
+ ret = ceph_auth_none_build_authorizer(ac, au);
+ if (ret) {
+ kfree(au);
+ return ret;
}
auth->authorizer = (struct ceph_authorizer *) au;
auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
return 0;
-
-bad2:
- ret = -ERANGE;
-bad:
- return ret;
-}
-
-static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
-{
- /* nothing to do */
}
static const struct ceph_auth_client_ops ceph_auth_none_ops = {
.build_request = build_request,
.handle_reply = handle_reply,
.create_authorizer = ceph_auth_none_create_authorizer,
- .destroy_authorizer = ceph_auth_none_destroy_authorizer,
};
int ceph_auth_none_init(struct ceph_auth_client *ac)
return -ENOMEM;
xi->starting = true;
- xi->built_authorizer = false;
ac->protocol = CEPH_AUTH_NONE;
ac->private = xi;
*/
struct ceph_none_authorizer {
+ struct ceph_authorizer base;
char buf[128];
int buf_len;
char reply_buf[0];
struct ceph_auth_none_info {
bool starting;
- bool built_authorizer;
- struct ceph_none_authorizer au; /* we only need one; it's static */
};
int ceph_auth_none_init(struct ceph_auth_client *ac);
return -EAGAIN;
}
+static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
+{
+ struct ceph_x_authorizer *au = (void *)a;
+
+ ceph_x_authorizer_cleanup(au);
+ kfree(au);
+}
+
static int ceph_x_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
if (!au)
return -ENOMEM;
+ au->base.destroy = ceph_x_destroy_authorizer;
+
ret = ceph_x_build_authorizer(ac, th, au);
if (ret) {
kfree(au);
return ret;
}
-static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
-{
- struct ceph_x_authorizer *au = (void *)a;
-
- ceph_x_authorizer_cleanup(au);
- kfree(au);
-}
-
-
static void ceph_x_reset(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi = ac->private;
.create_authorizer = ceph_x_create_authorizer,
.update_authorizer = ceph_x_update_authorizer,
.verify_authorizer_reply = ceph_x_verify_authorizer_reply,
- .destroy_authorizer = ceph_x_destroy_authorizer,
.invalidate_authorizer = ceph_x_invalidate_authorizer,
.reset = ceph_x_reset,
.destroy = ceph_x_destroy,
struct ceph_x_authorizer {
+ struct ceph_authorizer base;
struct ceph_crypto_key session_key;
struct ceph_buffer *buf;
unsigned int service;
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
if (atomic_dec_and_test(&osd->o_ref)) {
- struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
-
if (osd->o_auth.authorizer)
- ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
+ ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
kfree(osd);
}
}
struct ceph_auth_handshake *auth = &o->o_auth;
if (force_new && auth->authorizer) {
- ceph_auth_destroy_authorizer(ac, auth->authorizer);
+ ceph_auth_destroy_authorizer(auth->authorizer);
auth->authorizer = NULL;
}
if (!auth->authorizer) {
const struct sock *sk2,
bool match_wildcard))
{
+ struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
struct sock *sk2;
struct hlist_nulls_node *node;
kuid_t uid = sock_i_uid(sk);
sk2->sk_family == sk->sk_family &&
ipv6_only_sock(sk2) == ipv6_only_sock(sk) &&
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
+ inet_csk(sk2)->icsk_bind_hash == tb &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
saddr_same(sk, sk2, false))
return reuseport_add_sock(sk, sk2);
return flags;
}
+/* Fills in tpi and returns header length to be pulled. */
static int parse_gre_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
bool *csum_err)
{
return -EINVAL;
}
}
- return iptunnel_pull_header(skb, hdr_len, tpi->proto, false);
+ return hdr_len;
}
static void ipgre_err(struct sk_buff *skb, u32 info,
struct tnl_ptk_info tpi;
bool csum_err = false;
- if (parse_gre_header(skb, &tpi, &csum_err)) {
+ if (parse_gre_header(skb, &tpi, &csum_err) < 0) {
if (!csum_err) /* ignore csum errors. */
return;
}
{
struct tnl_ptk_info tpi;
bool csum_err = false;
+ int hdr_len;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
}
#endif
- if (parse_gre_header(skb, &tpi, &csum_err) < 0)
+ hdr_len = parse_gre_header(skb, &tpi, &csum_err);
+ if (hdr_len < 0)
+ goto drop;
+ if (iptunnel_pull_header(skb, hdr_len, tpi.proto, false) < 0)
goto drop;
if (ipgre_rcv(skb, &tpi) == PACKET_RCVD)
if (!IS_ERR(rt)) {
tdev = rt->dst.dev;
- dst_cache_set_ip4(&tunnel->dst_cache, &rt->dst,
- fl4.saddr);
ip_rt_put(rt);
}
if (dev->type != ARPHRD_ETHER)
dev->flags |= IFF_POINTOPOINT;
+
+ dst_cache_reset(&tunnel->dst_cache);
}
if (!tdev && tunnel->parms.link)
memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
sizeof(udp_conf.peer_ip6));
udp_conf.use_udp6_tx_checksums =
- cfg->udp6_zero_tx_checksums;
+ ! cfg->udp6_zero_tx_checksums;
udp_conf.use_udp6_rx_checksums =
- cfg->udp6_zero_rx_checksums;
+ ! cfg->udp6_zero_rx_checksums;
} else
#endif
{
int bearer_id = b->identity;
struct tipc_link_entry *le;
u16 bc_ack = msg_bcast_ack(hdr);
+ u32 self = tipc_own_addr(net);
int rc = 0;
__skb_queue_head_init(&xmitq);
return tipc_node_bc_rcv(net, skb, bearer_id);
}
+ /* Discard unicast link messages destined for another node */
+ if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
+ goto discard;
+
/* Locate neighboring node that sent packet */
n = tipc_node_find(net, msg_prevnode(hdr));
if (unlikely(!n))
*/
void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
{
- struct hdac_stream *s;
+ struct hdac_stream *s, *_s;
struct hdac_ext_stream *stream;
struct hdac_bus *bus = ebus_to_hbus(ebus);
- while (!list_empty(&bus->stream_list)) {
- s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
+ list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
stream = stream_to_hdac_ext_stream(s);
snd_hdac_ext_stream_decouple(ebus, stream, false);
list_del(&s->list);
#include <sound/core.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
+#include <sound/hda_register.h>
static struct i915_audio_component *hdac_acomp;
}
EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+ ((pci)->device == 0x0c0c) || \
+ ((pci)->device == 0x0d0c) || \
+ ((pci)->device == 0x160c))
+
/**
- * snd_hdac_get_display_clk - Get CDCLK in kHz
+ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
* @bus: HDA core bus
*
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
+ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
+ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
+ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
+ * BCLK = CDCLK * M / N
+ * The values will be lost when the display power well is disabled and need to
+ * be restored to avoid abnormal playback speed.
*
- * This function queries CDCLK value in kHz from the graphics driver and
- * returns the value. A negative code is returned in error.
+ * Call this function at initializing and changing power well, as well as
+ * at ELD notifier for the hotplug.
*/
-int snd_hdac_get_display_clk(struct hdac_bus *bus)
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
struct i915_audio_component *acomp = bus->audio_component;
+ struct pci_dev *pci = to_pci_dev(bus->dev);
+ int cdclk_freq;
+ unsigned int bclk_m, bclk_n;
+
+ if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
+ return; /* only for i915 binding */
+ if (!CONTROLLER_IN_GPU(pci))
+ return; /* only HSW/BDW */
+
+ cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
+ switch (cdclk_freq) {
+ case 337500:
+ bclk_m = 16;
+ bclk_n = 225;
+ break;
+
+ case 450000:
+ default: /* default CDCLK 450MHz */
+ bclk_m = 4;
+ bclk_n = 75;
+ break;
+
+ case 540000:
+ bclk_m = 4;
+ bclk_n = 90;
+ break;
+
+ case 675000:
+ bclk_m = 8;
+ bclk_n = 225;
+ break;
+ }
- if (!acomp || !acomp->ops)
- return -ENODEV;
-
- return acomp->ops->get_cdclk_freq(acomp->dev);
+ snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
+ snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
}
-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
+EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
/* There is a fixed mapping between audio pin node and display port
* on current Intel platforms:
#define azx_del_card_list(chip) /* NOP */
#endif /* CONFIG_PM */
-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
- * BCLK = CDCLK * M / N
- * The values will be lost when the display power well is disabled and need to
- * be restored to avoid abnormal playback speed.
- */
-static void haswell_set_bclk(struct hda_intel *hda)
-{
- struct azx *chip = &hda->chip;
- int cdclk_freq;
- unsigned int bclk_m, bclk_n;
-
- if (!hda->need_i915_power)
- return;
-
- cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
- switch (cdclk_freq) {
- case 337500:
- bclk_m = 16;
- bclk_n = 225;
- break;
-
- case 450000:
- default: /* default CDCLK 450MHz */
- bclk_m = 4;
- bclk_n = 75;
- break;
-
- case 540000:
- bclk_m = 4;
- bclk_n = 90;
- break;
-
- case 675000:
- bclk_m = 8;
- bclk_n = 225;
- break;
- }
-
- azx_writew(chip, HSW_EM4, bclk_m);
- azx_writew(chip, HSW_EM5, bclk_n);
-}
-
#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
/*
* power management
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
&& hda->need_i915_power) {
snd_hdac_display_power(azx_bus(chip), true);
- haswell_set_bclk(hda);
+ snd_hdac_i915_set_bclk(azx_bus(chip));
}
if (chip->msi)
if (pci_enable_msi(pci) < 0)
bus = azx_bus(chip);
if (hda->need_i915_power) {
snd_hdac_display_power(bus, true);
- haswell_set_bclk(hda);
+ snd_hdac_i915_set_bclk(bus);
} else {
/* toggle codec wakeup bit for STATESTS read */
snd_hdac_set_codec_wakeup(bus, true);
/* initialize chip */
azx_init_pci(chip);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- struct hda_intel *hda;
-
- hda = container_of(chip, struct hda_intel, chip);
- haswell_set_bclk(hda);
- }
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+ snd_hdac_i915_set_bclk(bus);
hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
if (atomic_read(&(codec)->core.in_pm))
return;
+ snd_hdac_i915_set_bclk(&codec->bus->core);
check_presence_and_report(codec, pin_nid);
}
SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
config SND_SOC_RT5616
tristate "Realtek RT5616 CODEC"
+ depends on I2C
config SND_SOC_RT5631
tristate "Realtek ALC5631/RT5631 CODEC"
}
EXPORT_SYMBOL_GPL(arizona_init_spk);
+int arizona_free_spk(struct snd_soc_codec *codec)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
+ arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_free_spk);
+
static const struct snd_soc_dapm_route arizona_mono_routes[] = {
{ "OUT1R", NULL, "OUT1L" },
{ "OUT2R", NULL, "OUT2L" },
extern int arizona_init_gpio(struct snd_soc_codec *codec);
extern int arizona_init_mono(struct snd_soc_codec *codec);
+extern int arizona_free_spk(struct snd_soc_codec *codec);
+
extern int arizona_init_dai(struct arizona_priv *priv, int dai);
int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
pdata->sdout_share = val;
- of_property_read_u32(np, "cirrus,boost-manager", &val);
+ if (of_property_read_u32(np, "cirrus,boost-manager", &val))
+ val = -1u;
+
switch (val) {
case CS35L32_BOOST_MGR_AUTO:
case CS35L32_BOOST_MGR_AUTO_AUDIO:
case CS35L32_BOOST_MGR_FIXED:
pdata->boost_mng = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,boost-manager DT value %d\n", val);
pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
}
- of_property_read_u32(np, "cirrus,sdout-datacfg", &val);
+ if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
+ val = -1u;
switch (val) {
case CS35L32_DATA_CFG_LR_VP:
case CS35L32_DATA_CFG_LR_STAT:
case CS35L32_DATA_CFG_LR_VPSTAT:
pdata->sdout_datacfg = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,sdout-datacfg DT value %d\n", val);
pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
}
- of_property_read_u32(np, "cirrus,battery-threshold", &val);
+ if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
+ val = -1u;
switch (val) {
case CS35L32_BATT_THRESH_3_1V:
case CS35L32_BATT_THRESH_3_2V:
case CS35L32_BATT_THRESH_3_4V:
pdata->batt_thresh = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,battery-threshold DT value %d\n", val);
pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
}
- of_property_read_u32(np, "cirrus,battery-recovery", &val);
+ if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
+ val = -1u;
switch (val) {
case CS35L32_BATT_RECOV_3_1V:
case CS35L32_BATT_RECOV_3_2V:
case CS35L32_BATT_RECOV_3_6V:
pdata->batt_recov = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,battery-recovery DT value %d\n", val);
priv->core.arizona->dapm = NULL;
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+ arizona_free_spk(codec);
+
return 0;
}
}
#ifdef CONFIG_PM
-static int hdmi_codec_resume(struct snd_soc_codec *codec)
+static int hdmi_codec_prepare(struct device *dev)
{
- struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
+ struct hdac_ext_device *edev = to_hda_ext_device(dev);
+ struct hdac_device *hdac = &edev->hdac;
+
+ pm_runtime_get_sync(&edev->hdac.dev);
+
+ /*
+ * Power down afg.
+ * codec_read is preferred over codec_write to set the power state.
+ * This way verb is send to set the power state and response
+ * is received. So setting power state is ensured without using loop
+ * to read the state.
+ */
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D3);
+
+ return 0;
+}
+
+static void hdmi_codec_complete(struct device *dev)
+{
+ struct hdac_ext_device *edev = to_hda_ext_device(dev);
struct hdac_hdmi_priv *hdmi = edev->private_data;
struct hdac_hdmi_pin *pin;
struct hdac_device *hdac = &edev->hdac;
- struct hdac_bus *bus = hdac->bus;
- int err;
- unsigned long timeout;
-
- hdac_hdmi_skl_enable_all_pins(&edev->hdac);
- hdac_hdmi_skl_enable_dp12(&edev->hdac);
/* Power up afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) {
-
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D0);
- /* Wait till power state is set to D0 */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
- && time_before(jiffies, timeout)) {
- msleep(50);
- }
- }
+ hdac_hdmi_skl_enable_all_pins(&edev->hdac);
+ hdac_hdmi_skl_enable_dp12(&edev->hdac);
/*
* As the ELD notify callback request is not entertained while the
list_for_each_entry(pin, &hdmi->pin_list, head)
hdac_hdmi_present_sense(pin, 1);
- /*
- * Codec power is turned ON during controller resume.
- * Turn it OFF here
- */
- err = snd_hdac_display_power(bus, false);
- if (err < 0) {
- dev_err(bus->dev,
- "Cannot turn OFF display power on i915, err: %d\n",
- err);
- return err;
- }
-
- return 0;
+ pm_runtime_put_sync(&edev->hdac.dev);
}
#else
-#define hdmi_codec_resume NULL
+#define hdmi_codec_prepare NULL
+#define hdmi_codec_complete NULL
#endif
static struct snd_soc_codec_driver hdmi_hda_codec = {
.probe = hdmi_codec_probe,
.remove = hdmi_codec_remove,
- .resume = hdmi_codec_resume,
.idle_bias_off = true,
};
struct hdac_ext_device *edev = to_hda_ext_device(dev);
struct hdac_device *hdac = &edev->hdac;
struct hdac_bus *bus = hdac->bus;
- unsigned long timeout;
int err;
dev_dbg(dev, "Enter: %s\n", __func__);
if (!bus)
return 0;
- /* Power down afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) {
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-
- /* Wait till power state is set to D3 */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)
- && time_before(jiffies, timeout)) {
-
- msleep(50);
- }
- }
-
+ /*
+ * Power down afg.
+ * codec_read is preferred over codec_write to set the power state.
+ * This way verb is send to set the power state and response
+ * is received. So setting power state is ensured without using loop
+ * to read the state.
+ */
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D3);
err = snd_hdac_display_power(bus, false);
if (err < 0) {
dev_err(bus->dev, "Cannot turn on display power on i915\n");
hdac_hdmi_skl_enable_dp12(&edev->hdac);
/* Power up afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0))
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D0);
return 0;
}
static const struct dev_pm_ops hdac_hdmi_pm = {
SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
+ .prepare = hdmi_codec_prepare,
+ .complete = hdmi_codec_complete,
};
static const struct hda_device_id hdmi_list[] = {
SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
0),
- /* ADC for button press detection */
- SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL,
- NAU8825_SAR_ADC_EN_SFT, 0),
+ /* ADC for button press detection. A dapm supply widget is used to
+ * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
+ * during suspend.
+ */
+ SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
+ NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
static void nau8825_restart_jack_detection(struct regmap *regmap)
{
+ /* Chip needs one FSCLK cycle in order to generate interrupts,
+ * as we cannot guarantee one will be provided by the system. Turning
+ * master mode on then off enables us to generate that FSCLK cycle
+ * with a minimum of contention on the clock bus.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
+
/* this will restart the entire jack detection process including MIC/GND
* switching and create interrupts. We have to go from 0 to 1 and back
* to 0 to restart.
struct regmap *regmap = nau8825->regmap;
int active_irq, clear_irq = 0, event = 0, event_mask = 0;
- regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq);
+ if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
+ dev_err(nau8825->dev, "failed to read irq status\n");
+ return IRQ_NONE;
+ }
if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
NAU8825_JACK_EJECTION_DETECTED) {
return ret;
}
}
-
- ret = regcache_sync(nau8825->regmap);
- if (ret) {
- dev_err(codec->dev,
- "Failed to sync cache: %d\n", ret);
- return ret;
- }
}
-
break;
case SND_SOC_BIAS_OFF:
if (nau8825->mclk_freq)
clk_disable_unprepare(nau8825->mclk);
-
- regcache_mark_dirty(nau8825->regmap);
break;
}
return 0;
}
+#ifdef CONFIG_PM
+static int nau8825_suspend(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ disable_irq(nau8825->irq);
+ regcache_cache_only(nau8825->regmap, true);
+ regcache_mark_dirty(nau8825->regmap);
+
+ return 0;
+}
+
+static int nau8825_resume(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ /* The chip may lose power and reset in S3. regcache_sync restores
+ * register values including configurations for sysclk, irq, and
+ * jack/button detection.
+ */
+ regcache_cache_only(nau8825->regmap, false);
+ regcache_sync(nau8825->regmap);
+
+ /* Check the jack plug status directly. If the headset is unplugged
+ * during S3 when the chip has no power, there will be no jack
+ * detection irq even after the nau8825_restart_jack_detection below,
+ * because the chip just thinks no headset has ever been plugged in.
+ */
+ if (!nau8825_is_jack_inserted(nau8825->regmap)) {
+ nau8825_eject_jack(nau8825);
+ snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
+ }
+
+ enable_irq(nau8825->irq);
+
+ /* Run jack detection to check the type (OMTP or CTIA) of the headset
+ * if there is one. This handles the case where a different type of
+ * headset is plugged in during S3. This triggers an IRQ iff a headset
+ * is already plugged in.
+ */
+ nau8825_restart_jack_detection(nau8825->regmap);
+
+ return 0;
+}
+#else
+#define nau8825_suspend NULL
+#define nau8825_resume NULL
+#endif
+
static struct snd_soc_codec_driver nau8825_codec_driver = {
.probe = nau8825_codec_probe,
.set_sysclk = nau8825_set_sysclk,
.set_pll = nau8825_set_pll,
.set_bias_level = nau8825_set_bias_level,
.suspend_bias_off = true,
+ .suspend = nau8825_suspend,
+ .resume = nau8825_resume,
.controls = nau8825_controls,
.num_controls = ARRAY_SIZE(nau8825_controls),
regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
- /* Chip needs one FSCLK cycle in order to generate interrupts,
- * as we cannot guarantee one will be provided by the system. Turning
- * master mode on then off enables us to generate that FSCLK cycle
- * with a minimum of contention on the clock bus.
- */
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
-
ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"nau8825", nau8825);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int nau8825_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
- disable_irq(client->irq);
- regcache_cache_only(nau8825->regmap, true);
- regcache_mark_dirty(nau8825->regmap);
-
- return 0;
-}
-
-static int nau8825_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
- regcache_cache_only(nau8825->regmap, false);
- regcache_sync(nau8825->regmap);
- enable_irq(client->irq);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops nau8825_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
-};
-
static const struct i2c_device_id nau8825_i2c_ids[] = {
{ "nau8825", 0 },
{ }
.name = "nau8825",
.of_match_table = of_match_ptr(nau8825_of_ids),
.acpi_match_table = ACPI_PTR(nau8825_acpi_match),
- .pm = &nau8825_pm,
},
.probe = nau8825_i2c_probe,
.remove = nau8825_i2c_remove,
/* Interface data select */
static const char * const rt5640_data_select[] = {
- "Normal", "left copy to right", "right copy to left", "Swap"};
+ "Normal", "Swap", "left copy to right", "right copy to left"};
static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
#define RT5640_IF1_DAC_SEL_SFT 14
#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
+#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
+#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
#define RT5640_IF1_ADC_SEL_SFT 12
#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
+#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
+#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
#define RT5640_IF2_DAC_SEL_SFT 10
#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
+#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
+#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
#define RT5640_IF2_ADC_SEL_SFT 8
#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
+#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
+#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
#define RT5640_IF3_DAC_SEL_SFT 6
#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
+#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
+#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
#define RT5640_IF3_ADC_SEL_SFT 4
#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
+#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
+#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
/* REC Left Mixer Control 1 (0x3b) */
#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
static int wm5102_codec_remove(struct snd_soc_codec *codec)
{
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
priv->core.arizona->dapm = NULL;
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+ arizona_free_spk(codec);
+
return 0;
}
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+ arizona_free_spk(codec);
+
return 0;
}
break;
default:
dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
- dspclk = wm8962->sysclk;
+ dspclk = wm8962->sysclk_rate;
}
dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
priv->core.arizona->dapm = NULL;
+ arizona_free_spk(codec);
+
return 0;
}
priv->core.arizona->dapm = NULL;
+ arizona_free_spk(codec);
+
return 0;
}
tristate
select SND_HDA_EXT_CORE
select SND_SOC_TOPOLOGY
- select SND_HDA_I915
select SND_SOC_INTEL_SST
config SND_SOC_INTEL_SKL_RT286_MACH
return 0;
/* wait for pause to complete before we reset the stream */
- while (stream->running && tries--)
+ while (stream->running && --tries)
msleep(1);
if (!tries) {
dev_err(hsw->dev, "error: reset stream %d still running\n",
skl_ipc_int_disable(dsp);
free_irq(dsp->irq, dsp);
+ dsp->cl_dev.ops.cl_cleanup_controller(dsp);
+ skl_cldma_int_disable(dsp);
+ skl_ipc_op_int_disable(dsp);
+ skl_ipc_int_disable(dsp);
+
skl_dsp_disable_core(dsp);
}
EXPORT_SYMBOL_GPL(skl_dsp_free);
{
int multiplier = 1;
struct skl_module_fmt *in_fmt, *out_fmt;
+ int in_rate, out_rate;
/* Since fixups is applied to pin 0 only, ibs, obs needs
if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
multiplier = 5;
- mcfg->ibs = (in_fmt->s_freq / 1000) *
- (mcfg->in_fmt->channels) *
- (mcfg->in_fmt->bit_depth >> 3) *
- multiplier;
-
- mcfg->obs = (mcfg->out_fmt->s_freq / 1000) *
- (mcfg->out_fmt->channels) *
- (mcfg->out_fmt->bit_depth >> 3) *
- multiplier;
+
+ if (in_fmt->s_freq % 1000)
+ in_rate = (in_fmt->s_freq / 1000) + 1;
+ else
+ in_rate = (in_fmt->s_freq / 1000);
+
+ mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
+ (mcfg->in_fmt->bit_depth >> 3) *
+ multiplier;
+
+ if (mcfg->out_fmt->s_freq % 1000)
+ out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
+ else
+ out_rate = (mcfg->out_fmt->s_freq / 1000);
+
+ mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
+ (mcfg->out_fmt->bit_depth >> 3) *
+ multiplier;
}
static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM;
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
mconfig->id.module_id, mconfig->guid);
if (ret < 0)
return ret;
+
+ mconfig->m_state = SKL_MODULE_LOADED;
}
/* update blob if blob is null for be with default value */
ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0)
return ret;
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
}
return 0;
list_for_each_entry(w_module, &pipe->w_list, node) {
mconfig = w_module->w->priv;
- if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod)
+ if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
+ mconfig->m_state > SKL_MODULE_UNINIT)
return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
mconfig->id.module_id);
}
if (!skl_is_pipe_mem_avail(skl, mconfig))
return -ENOMEM;
+ skl_tplg_alloc_pipe_mem(skl, mconfig);
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
/*
* Create a list of modules for pipe.
* This list contains modules from source to sink
src_module = dst_module;
}
- skl_tplg_alloc_pipe_mem(skl, mconfig);
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
return 0;
}
enum skl_module_state {
SKL_MODULE_UNINIT = 0,
- SKL_MODULE_INIT_DONE = 1,
- SKL_MODULE_LOADED = 2,
- SKL_MODULE_UNLOADED = 3,
- SKL_MODULE_BIND_DONE = 4
+ SKL_MODULE_LOADED = 1,
+ SKL_MODULE_INIT_DONE = 2,
+ SKL_MODULE_BIND_DONE = 3,
+ SKL_MODULE_UNLOADED = 4,
};
struct skl_module_cfg {
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
+ int ret = 0;
/*
* Do not suspend if streams which are marked ignore suspend are
enable_irq_wake(bus->irq);
pci_save_state(pci);
pci_disable_device(pci);
- return 0;
} else {
- return _skl_suspend(ebus);
+ ret = _skl_suspend(ebus);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+ ret = snd_hdac_display_power(bus, false);
+ if (ret < 0)
+ dev_err(bus->dev,
+ "Cannot turn OFF display power on i915\n");
}
+
+ return ret;
}
static int skl_resume(struct device *dev)
if (bus->irq >= 0)
free_irq(bus->irq, (void *)bus);
- if (bus->remap_addr)
- iounmap(bus->remap_addr);
-
snd_hdac_bus_free_stream_pages(bus);
snd_hdac_stream_free_all(ebus);
snd_hdac_link_free_all(ebus);
+
+ if (bus->remap_addr)
+ iounmap(bus->remap_addr);
+
pci_release_regions(skl->pci);
pci_disable_device(skl->pci);
snd_hdac_ext_bus_exit(ebus);
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_i915_exit(&ebus->bus);
return 0;
}
if (skl->tplg)
release_firmware(skl->tplg);
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- snd_hdac_i915_exit(&ebus->bus);
-
if (pci_dev_run_wake(pci))
pm_runtime_get_noresume(&pci->dev);
- pci_dev_put(pci);
+
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(ebus);
+
skl_platform_unregister(&pci->dev);
skl_free_dsp(skl);
skl_machine_device_unregister(skl);
int count = 0;
char *state = "not set";
+ /* card won't be set for the dummy component, as a spot fix
+ * we're checking for that case specifically here but in future
+ * we will ensure that the dummy component looks like others.
+ */
+ if (!cmpnt->card)
+ return 0;
+
list_for_each_entry(w, &cmpnt->card->widgets, list) {
if (w->dapm != dapm)
continue;