dma_addr_t
dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
void
dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
int
dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
void
dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
The four functions above are just like the counterpart functions
without the _attrs suffixes, except that they pass an optional
-struct dma_attrs*.
-
-struct dma_attrs encapsulates a set of "DMA attributes". For the
-definition of struct dma_attrs see linux/dma-attrs.h.
+dma_attrs.
The interpretation of DMA attributes is architecture-specific, and
each attribute should be documented in Documentation/DMA-attributes.txt.
-If struct dma_attrs* is NULL, the semantics of each of these
-functions is identical to those of the corresponding function
+If dma_attrs are 0, the semantics of each of these functions
+is identical to those of the corresponding function
without the _attrs suffix. As a result dma_map_single_attrs()
can generally replace dma_map_single(), etc.
you could pass an attribute DMA_ATTR_FOO when mapping memory
for DMA:
-#include <linux/dma-attrs.h>
-/* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and
+#include <linux/dma-mapping.h>
+/* DMA_ATTR_FOO should be defined in linux/dma-mapping.h and
* documented in Documentation/DMA-attributes.txt */
...
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_FOO, &attrs);
+ unsigned long attr;
+ attr |= DMA_ATTR_FOO;
....
- n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr);
+ n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, attr);
....
Architectures that care about DMA_ATTR_FOO would check for its
void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
....
- int foo = dma_get_attr(DMA_ATTR_FOO, attrs);
- ....
- if (foo)
+ if (attrs & DMA_ATTR_FOO)
/* twizzle the frobnozzle */
....
==============
This document describes the semantics of the DMA attributes that are
-defined in linux/dma-attrs.h.
+defined in linux/dma-mapping.h.
DMA_ATTR_WRITE_BARRIER
----------------------
ARM/SAMSUNG S5P SERIES 2D GRAPHICS ACCELERATION (G2D) SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
+M: Andrzej Hajda <a.hajda@samsung.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
M: Kyungmin Park <kyungmin.park@samsung.com>
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
M: Jeongtae Park <jtp.park@samsung.com>
+M: Andrzej Hajda <a.hajda@samsung.com>
L: linux-arm-kernel@lists.infradead.org
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/usb/pwc/*
PWM FAN DRIVER
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
+M: Lukasz Majewski <l.majewski@samsung.com>
L: linux-hwmon@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/hwmon/pwm-fan.txt
F: drivers/thermal/samsung/
SAMSUNG USB2 PHY DRIVER
-M: Kamil Debski <k.debski@samsung.com>
+M: Kamil Debski <kamil@wypas.org>
+M: Sylwester Nawrocki <s.nawrocki@samsung.com>
L: linux-kernel@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/phy/samsung-phy.txt
#ifndef _ALPHA_DMA_MAPPING_H
#define _ALPHA_DMA_MAPPING_H
-#include <linux/dma-attrs.h>
-
extern struct dma_map_ops *dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
static void *alpha_noop_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
static dma_addr_t alpha_pci_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
int dac_allowed;
static void alpha_pci_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags;
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
static void *alpha_pci_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
void *cpu_addr;
static void alpha_pci_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
static int alpha_pci_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
struct scatterlist *start, *end, *out;
static void alpha_pci_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = alpha_gendev_to_pci(dev);
unsigned long flags;
static void *arc_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long order = get_order(size);
struct page *page;
* (vs. always going to memory - thus are faster)
*/
if ((is_isa_arcv2() && ioc_exists) ||
- dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ (attrs & DMA_ATTR_NON_CONSISTENT))
need_coh = 0;
/*
}
static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
phys_addr_t paddr = plat_dma_to_phys(dev, dma_handle);
struct page *page = virt_to_page(paddr);
int is_non_coh = 1;
- is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
+ is_non_coh = (attrs & DMA_ATTR_NON_CONSISTENT) ||
(is_isa_arcv2() && ioc_exists);
if (PageHighMem(page) || !is_non_coh)
static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
_dma_cache_sync(paddr, size, dir);
}
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
*/
static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_addr;
int ret;
* should be)
*/
static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct safe_buffer *buf;
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm/memory.h>
* to be the device-viewed address.
*/
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs);
+ gfp_t gfp, unsigned long attrs);
/**
* arm_dma_free - free memory allocated by arm_dma_alloc
* during and after this call executing are illegal.
*/
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs);
+ dma_addr_t handle, unsigned long attrs);
/**
* arm_dma_mmap - map a coherent DMA allocation into user space
*/
extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
+ unsigned long attrs);
/*
* This can be called during early boot to increase the size of the atomic
* The scatter list versions of the above methods.
*/
extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
+ enum dma_data_direction, unsigned long attrs);
extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
- enum dma_data_direction, struct dma_attrs *attrs);
+ enum dma_data_direction, unsigned long attrs);
extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
enum dma_data_direction);
extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#endif /* __KERNEL__ */
#endif
#define _ASM_ARM_XEN_PAGE_COHERENT_H
#include <asm/page.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void __xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir);
dma_addr_t handle, size_t size, enum dma_data_direction dir);
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
- void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
__generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
unsigned long page_pfn = page_to_xen_pfn(page);
unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
unsigned long pfn = PFN_DOWN(handle);
/*
#endif
#if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
- defined(CONFIG_GENERIC_BUG)
+ defined(CONFIG_GENERIC_BUG) || defined(CONFIG_JUMP_LABEL)
#define ARM_EXIT_KEEP(x) x
#define ARM_EXIT_DISCARD(x)
#else
*/
static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
* whatever the device wrote there.
*/
static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
EXPORT_SYMBOL(arm_dma_ops);
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs);
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs);
+ dma_addr_t handle, unsigned long attrs);
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs);
+ unsigned long attrs);
struct dma_map_ops arm_coherent_dma_ops = {
.alloc = arm_coherent_dma_alloc,
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
}
-static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
+static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
{
- prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
- pgprot_writecombine(prot) :
- pgprot_dmacoherent(prot);
+ prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
+ pgprot_writecombine(prot) :
+ pgprot_dmacoherent(prot);
return prot;
}
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
- struct dma_attrs *attrs, const void *caller)
+ unsigned long attrs, const void *caller)
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page = NULL;
.gfp = gfp,
.prot = prot,
.caller = caller,
- .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+ .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
.coherent_flag = is_coherent ? COHERENT : NORMAL,
};
* virtual and bus address for that space.
*/
void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
}
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
attrs, __builtin_return_address(0));
static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = -ENXIO;
#ifdef CONFIG_MMU
*/
static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
}
int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef CONFIG_MMU
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
* Free a buffer as defined by the above mapping.
*/
static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs,
+ dma_addr_t handle, unsigned long attrs,
bool is_coherent)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
.size = PAGE_ALIGN(size),
.cpu_addr = cpu_addr,
.page = page,
- .want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs),
+ .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
};
buf = arm_dma_buffer_find(cpu_addr);
}
void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
__arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
}
static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
__arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
}
int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
int ret;
* here.
*/
int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
* rules concerning calls here are the same as for dma_unmap_single().
*/
void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
struct scatterlist *s;
static const int iommu_order_array[] = { 9, 8, 4, 0 };
static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
- gfp_t gfp, struct dma_attrs *attrs,
+ gfp_t gfp, unsigned long attrs,
int coherent_flag)
{
struct page **pages;
if (!pages)
return NULL;
- if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs))
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
{
unsigned long order = get_order(size);
struct page *page;
}
/* Go straight to 4K chunks if caller says it's OK. */
- if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+ if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
order_idx = ARRAY_SIZE(iommu_order_array) - 1;
/*
}
static int __iommu_free_buffer(struct device *dev, struct page **pages,
- size_t size, struct dma_attrs *attrs)
+ size_t size, unsigned long attrs)
{
int count = size >> PAGE_SHIFT;
int i;
- if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) {
+ if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
dma_release_from_contiguous(dev, pages[0], count);
} else {
for (i = 0; i < count; i++)
return (struct page **)page;
}
-static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
+static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
{
struct vm_struct *area;
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
return __atomic_get_pages(cpu_addr);
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return cpu_addr;
area = find_vm_area(cpu_addr);
}
static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs,
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
int coherent_flag)
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
if (*handle == DMA_ERROR_CODE)
goto err_buffer;
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
+ if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return pages;
addr = __iommu_alloc_remap(pages, size, gfp, prot,
}
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
}
static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
}
static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long uaddr = vma->vm_start;
unsigned long usize = vma->vm_end - vma->vm_start;
}
static int arm_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
static int arm_coherent_iommu_mmap_attrs(struct device *dev,
struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
}
* Must not be called with IRQs disabled.
*/
void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs, int coherent_flag)
+ dma_addr_t handle, unsigned long attrs, int coherent_flag)
{
struct page **pages;
size = PAGE_ALIGN(size);
return;
}
- if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
+ if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0) {
dma_common_free_remap(cpu_addr, size,
VM_ARM_DMA_CONSISTENT | VM_USERMAP);
}
}
void arm_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
}
void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
__arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
}
static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
+ size_t size, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page **pages = __iommu_get_pages(cpu_addr, attrs);
*/
static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
size_t size, dma_addr_t *handle,
- enum dma_data_direction dir, struct dma_attrs *attrs,
+ enum dma_data_direction dir, unsigned long attrs,
bool is_coherent)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
phys_addr_t phys = page_to_phys(sg_page(s));
unsigned int len = PAGE_ALIGN(s->offset + s->length);
- if (!is_coherent &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
prot = __dma_direction_to_prot(dir);
}
static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs,
+ enum dma_data_direction dir, unsigned long attrs,
bool is_coherent)
{
struct scatterlist *s = sg, *dma = sg, *start = sg;
* obtained via sg_dma_{address,length}.
*/
int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
}
* sg_dma_{address,length}.
*/
int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
}
static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs,
- bool is_coherent)
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs, bool is_coherent)
{
struct scatterlist *s;
int i;
if (sg_dma_len(s))
__iommu_remove_mapping(dev, sg_dma_address(s),
sg_dma_len(s));
- if (!is_coherent &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
* rules concerning calls here are the same as for dma_unmap_single().
*/
void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir,
+ unsigned long attrs)
{
__iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
}
* rules concerning calls here are the same as for dma_unmap_single().
*/
void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
__iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
}
*/
static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t dma_addr;
*/
static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_cpu_to_dev(page, offset, size, dir);
return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
* Coherent IOMMU aware version of arm_dma_unmap_page()
*/
static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
* IOMMU aware version of arm_dma_unmap_page()
*/
static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
dma_addr_t iova = handle & PAGE_MASK;
if (!iova)
return;
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__dma_page_dev_to_cpu(page, offset, size, dir);
iommu_unmap(mapping->domain, iova, len);
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
if (is_device_dma_coherent(hwdev))
return;
- if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
__xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (is_device_dma_coherent(hwdev))
return;
- if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
return;
__xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
static int swiotlb __read_mostly;
-static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
bool coherent)
{
- if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
return pgprot_writecombine(prot);
return prot;
}
static void *__dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (dev == NULL) {
WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
static void __dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool freed;
phys_addr_t paddr = dma_to_phys(dev, dma_handle);
static void *__dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page;
void *ptr, *coherent_ptr;
static void __dma_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dev_addr;
static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (!is_device_dma_coherent(dev))
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i, ret;
static void __swiotlb_unmap_sg_attrs(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static int __swiotlb_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = -ENXIO;
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
static void *__dummy_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return NULL;
}
static void __dummy_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
static int __dummy_mmap(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return -ENXIO;
}
static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return DMA_ERROR_CODE;
}
static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return 0;
}
static void __dummy_unmap_sg(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
static void *__iommu_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
}
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
size_t iosize = size;
* Hence how dodgy the below logic looks...
*/
if (__in_atomic_pool(cpu_addr, size)) {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_from_pool(cpu_addr, size);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
iommu_dma_free(dev, area->pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
__free_pages(virt_to_page(cpu_addr), get_order(size));
}
}
static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vm_struct *area;
int ret;
static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
+ size_t size, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct vm_struct *area = find_vm_area(cpu_addr);
static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
int prot = dma_direction_to_prot(dir, coherent);
dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
if (!iommu_dma_mapping_error(dev, dev_addr) &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_single_for_device(dev, dev_addr, size, dir);
return dev_addr;
static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
bool coherent = is_device_dma_coherent(dev);
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_device(dev, sgl, nelems, dir);
return iommu_dma_map_sg(dev, sgl, nelems,
static void __iommu_unmap_sg_attrs(struct device *dev,
struct scatterlist *sgl, int nelems,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
+ if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
__iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
}
static void *avr32_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
struct page *page;
dma_addr_t phys;
return NULL;
phys = page_to_phys(page);
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ if (attrs & DMA_ATTR_WRITE_COMBINE) {
/* Now, map the page into P3 with write-combining turned on */
*handle = phys;
return __ioremap(phys, size, _PAGE_BUFFER);
}
static void avr32_dma_free(struct device *dev, size_t size,
- void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs)
+ void *cpu_addr, dma_addr_t handle, unsigned long attrs)
{
struct page *page;
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) {
+ if (attrs & DMA_ATTR_WRITE_COMBINE) {
iounmap(cpu_addr);
page = phys_to_page(handle);
static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
void *cpu_addr = page_address(page) + offset;
static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
}
static void *bfin_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
}
static void bfin_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
__free_dma_pages((unsigned long)vaddr, get_pages(size));
}
static int bfin_dma_map_sg(struct device *dev, struct scatterlist *sg_list,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static dma_addr_t bfin_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t handle = (dma_addr_t)(page_address(page) + offset);
extern void coherent_mem_init(u32 start, u32 size);
void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs);
+ gfp_t gfp, unsigned long attrs);
void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs);
+ dma_addr_t dma_handle, unsigned long attrs);
#endif /* _ASM_C6X_DMA_MAPPING_H */
static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t handle = virt_to_phys(page_address(page) + offset);
}
static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction dir, unsigned long attrs)
{
c6x_dma_sync(handle, size, dir);
}
static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
}
static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
* virtual and DMA address for that space.
*/
void *c6x_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
u32 paddr;
int order;
* Free DMA coherent memory as defined by the above mapping.
*/
void c6x_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
int order;
#include <asm/io.h>
static void *v32_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
}
static void v32_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static inline dma_addr_t v32_dma_map_page(struct device *dev,
struct page *page, unsigned long offset, size_t size,
- enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
return page_to_phys(page) + offset;
}
static inline int v32_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
printk("Map sg\n");
return nents;
static LIST_HEAD(dma_alloc_list);
static void *frv_dma_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
struct dma_alloc_record *new;
struct list_head *this = &dma_alloc_list;
}
static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct dma_alloc_record *rec;
unsigned long flags;
static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
flush_dcache_page(page);
#include <asm/io.h>
static void *frv_dma_alloc(struct device *hwdev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
}
static void frv_dma_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
consistent_free(vaddr);
}
static int frv_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long dampr2;
void *vaddr;
static dma_addr_t frv_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
flush_dcache_page(page);
return (dma_addr_t) page_to_phys(page) + offset;
static void *dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
static void dma_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
static dma_addr_t map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return page_to_phys(page) + offset;
}
static int map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
#include <asm/io.h>
struct device;
static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
}
static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, unsigned long attrs)
{
gen_pool_free(coherent_pool, (unsigned long) vaddr, size);
}
static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
static dma_addr_t hexagon_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
static dma_addr_t sba_map_page(struct device *dev, struct page *page,
unsigned long poff, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ioc *ioc;
void *addr = page_address(page) + poff;
static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return sba_map_page(dev, virt_to_page(addr),
(unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
}
void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
sba_unmap_page(dev, iova, size, dir, attrs);
}
*/
static void *
sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
- gfp_t flags, struct dma_attrs *attrs)
+ gfp_t flags, unsigned long attrs)
{
struct ioc *ioc;
void *addr;
* device to map single to get an iova mapping.
*/
*dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
- size, 0, NULL);
+ size, 0, 0);
return addr;
}
* See Documentation/DMA-API-HOWTO.txt
*/
static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
+ sba_unmap_single_attrs(dev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
*/
static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
*/
static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef ASSERT_PDIR_SANITY
struct ioc *ioc;
struct task_struct;
struct pci_dev;
struct msi_desc;
-struct dma_attrs;
typedef void ia64_mv_setup_t (char **);
typedef void ia64_mv_cpu_init_t (void);
static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (dev->coherent_dma_mask != DMA_BIT_MASK(64))
gfp |= GFP_DMA;
static void ia64_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
*/
static void *sn_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t * dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *cpuaddr;
unsigned long phys_addr;
* any associated IOMMU mappings.
*/
static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *cpu_addr = page_address(page) + offset;
dma_addr_t dma_addr;
unsigned long phys_addr;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
- int dmabarr;
-
- dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
BUG_ON(!dev_is_pci(dev));
phys_addr = __pa(cpu_addr);
- if (dmabarr)
+ if (attrs & DMA_ATTR_WRITE_BARRIER)
dma_addr = provider->dma_map_consistent(pdev, phys_addr,
size, SN_DMA_ADDR_PHYS);
else
*/
static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
*/
static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct pci_dev *pdev = to_pci_dev(dev);
*/
static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,
int nhwentries, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long phys_addr;
struct scatterlist *saved_sg = sgl, *sg;
struct pci_dev *pdev = to_pci_dev(dev);
struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
int i;
- int dmabarr;
-
- dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
BUG_ON(!dev_is_pci(dev));
for_each_sg(sgl, sg, nhwentries, i) {
dma_addr_t dma_addr;
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
- if (dmabarr)
+ if (attrs & DMA_ATTR_WRITE_BARRIER)
dma_addr = provider->dma_map_consistent(pdev,
phys_addr,
sg->length,
#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
- gfp_t flag, struct dma_attrs *attrs)
+ gfp_t flag, unsigned long attrs)
{
struct page *page, **map;
pgprot_t pgprot;
}
static void m68k_dma_free(struct device *dev, size_t size, void *addr,
- dma_addr_t handle, struct dma_attrs *attrs)
+ dma_addr_t handle, unsigned long attrs)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
vfree(addr);
#include <asm/cacheflush.h>
static void *m68k_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
/* ignore region specifiers */
}
static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
}
static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t handle = page_to_phys(page) + offset;
}
static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct scatterlist *sg;
* virtual and bus address for that space.
*/
static void *metag_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
struct page *page;
struct metag_vm_region *c;
* free a page as defined by the above mapping.
*/
static void metag_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct metag_vm_region *c;
unsigned long flags, addr;
static int metag_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags, user_size, kern_size;
struct metag_vm_region *c;
int ret = -ENXIO;
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
static dma_addr_t metag_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
dma_sync_for_device((void *)(page_to_phys(page) + offset), size,
direction);
static void metag_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
static int metag_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static void metag_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
unsigned long offset,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
__dma_sync(page_to_phys(page) + offset, size, direction);
return page_to_phys(page) + offset;
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
/* There is not necessary to do cache cleanup
*
static
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
direction, attrs);
}
static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
mb();
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
}
static void octeon_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
if (mips_cm_revision() >= CM_REV_CM3)
return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
- if (config_enabled(CONFIG_SMP))
+ if (IS_ENABLED(CONFIG_SMP))
return smp_num_siblings;
return 1;
* it better already be global)
*/
if (pte_none(*buddy)) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
}
htw_stop();
/* Preserve global status for the pair */
- if (config_enabled(CONFIG_XPA)) {
+ if (IS_ENABLED(CONFIG_XPA)) {
if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
null.pte_high = _PAGE_GLOBAL;
} else {
static inline pte_t pte_wrprotect(pte_t pte)
{
pte.pte_low &= ~_PAGE_WRITE;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
static inline pte_t pte_mkclean(pte_t pte)
{
pte.pte_low &= ~_PAGE_MODIFIED;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
static inline pte_t pte_mkold(pte_t pte)
{
pte.pte_low &= ~_PAGE_ACCESSED;
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low &= ~_PAGE_SILENT_READ;
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
{
pte.pte_low |= _PAGE_WRITE;
if (pte.pte_low & _PAGE_MODIFIED) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
{
pte.pte_low |= _PAGE_MODIFIED;
if (pte.pte_low & _PAGE_WRITE) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
}
{
pte.pte_low |= _PAGE_ACCESSED;
if (!(pte.pte_low & _PAGE_NO_READ)) {
- if (!config_enabled(CONFIG_XPA))
+ if (!IS_ENABLED(CONFIG_XPA))
pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
}
0, /* null terminated */
};
- if (config_enabled(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
+ if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
return syscalls_O32;
- if (config_enabled(CONFIG_MIPS32_N32))
+ if (IS_ENABLED(CONFIG_MIPS32_N32))
return syscalls_N32;
BUG();
((ka)->sa.sa_flags & SA_SIGINFO))
#else
#define sig_uses_siginfo(ka, abi) \
- (config_enabled(CONFIG_64BIT) ? 1 : \
- (config_enabled(CONFIG_TRAD_SIGNALS) ? \
+ (IS_ENABLED(CONFIG_64BIT) ? 1 : \
+ (IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \
((ka)->sa.sa_flags & SA_SIGINFO) : 1) )
#endif
{
int ret;
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
- if ((config_enabled(CONFIG_32BIT) ||
+ if ((IS_ENABLED(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
(regs->regs[2] == __NR_syscall))
i++;
*/
static inline bool eva_kernel_access(void)
{
- if (!config_enabled(CONFIG_EVA))
+ if (!IS_ENABLED(CONFIG_EVA))
return false;
return segment_eq(get_fs(), get_ds());
const char *get_system_type(void)
{
- if (config_enabled(CONFIG_MACH_JZ4780))
+ if (IS_ENABLED(CONFIG_MACH_JZ4780))
return "JZ4780";
return "JZ4740";
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
+int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
static inline void check_daddiu(void)
{
void __init check_bugs64_early(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
check_mult_sh();
check_daddiu();
}
void __init check_bugs64(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6))
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
check_daddi();
}
return -ELIBBAD;
}
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
fp_abi = state->fp_abi;
* not be worried about N32/N64 binaries.
*/
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return;
switch (state->overall_fp_mode) {
mips_cm_probe_l2sync();
/* determine register width for this CM */
- mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+ mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cm_core_lock, cpu));
(s32)MIPSInst_SIMM(ir);
return 0;
case daddiu_op:
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
break;
if (MIPSInst_RT(ir))
(u32)regs->regs[MIPSInst_RT(ir)]);
return 0;
case dsll_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
MIPSInst_FD(ir));
return 0;
case dsrl_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
MIPSInst_FD(ir));
return 0;
case daddu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
(u64)regs->regs[MIPSInst_RT(ir)];
return 0;
case dsubu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
s64 res;
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
u64 res;
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
{
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
{
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
break;
case ldl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
break;
case ldr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
break;
case sdl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
break;
case sdr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
break;
case lld_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
break;
case scd_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
return -EINVAL;
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
goto out_err;
* should already have been done when handling scalar FP
* context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
err = __put_user(read_msa_csr(), &msa->csr);
err |= _save_msa_all_upper(&msa->wr);
unsigned int csr;
int i, err;
- if (!config_enabled(CONFIG_CPU_HAS_MSA))
+ if (!IS_ENABLED(CONFIG_CPU_HAS_MSA))
return SIGSYS;
if (size != sizeof(*msa))
* scalar FP context, so FPU & MSA should have already been
* disabled whilst handling scalar FP context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
write_msa_csr(csr);
err |= _restore_msa_all_upper(&msa->wr);
* EVA does not have userland equivalents of ldc1 or sdc1, so
* save to the kernel FP context & copy that to userland below.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(1);
while (1) {
* disable the FPU here such that the code below simply copies to
* the kernel FP context.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(0);
while (1) {
if (threads_disabled)
return 1;
- if ((!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
- && (!config_enabled(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
+ if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
return 1;
mips_cm_lock_other(core, 0);
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHW(addr, value, res);
else
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadW(addr, value, res);
else
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHWU(addr, value, res);
else
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreHW(addr, value, res);
else
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreW(addr, value, res);
else
#include <dma-coherence.h>
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
}
static void loongson_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
dir, attrs);
static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
+ int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
mb();
return r;
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
- if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32))
+ if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
return 1;
- else if (config_enabled(CONFIG_32BIT) &&
- !config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ else if (IS_ENABLED(CONFIG_32BIT) &&
+ !IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
return !test_thread_flag(TIF_32BIT_FPREGS);
}
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
struct page *page = NULL;
* XXX: seems like the coherent and non-coherent implementations could
* be consolidated.
*/
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
gfp = massage_gfp_flags(dev, gfp);
}
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) vaddr;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if (attrs & DMA_ATTR_NON_CONSISTENT) {
mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
return;
}
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
pfn = page_to_pfn(virt_to_page((void *)addr));
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
}
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction direction, unsigned long attrs)
{
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
}
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int i;
struct scatterlist *sg;
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (!plat_device_is_coherent(dev))
__dma_sync(page, offset, size, direction);
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
pte_off_odd += offsetof(pte_t, pte_high);
#endif
- if (config_enabled(CONFIG_XPA)) {
+ if (IS_ENABLED(CONFIG_XPA)) {
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
unsigned int swmode = mode & ~hwmode;
- if (config_enabled(CONFIG_XPA) && !cpu_has_64bits) {
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
uasm_i_lui(p, scratch, swmode >> 16);
uasm_i_or(p, pte, pte, scratch);
BUG_ON(swmode & 0xffff);
pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
/* Set pointer size to size of directory pointers */
- if (config_enabled(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
pwsize |= MIPS_PWSIZE_PS_MASK;
/* PTEs may be multiple pointers long (e.g. with XPA) */
pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
* the pwctl fields.
*/
config = 1 << MIPS_PWCTL_PWEN_SHIFT;
- if (config_enabled(CONFIG_64BIT))
+ if (IS_ENABLED(CONFIG_64BIT))
config |= MIPS_PWCTL_XU_MASK;
write_c0_pwctl(config);
pr_info("Hardware Page Table Walker enabled\n");
*/
static int run_once = 0;
- if (config_enabled(CONFIG_XPA) && !cpu_has_rixi)
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
panic("Kernels supporting XPA currently require CPUs with RIXI");
output_pgtable_bits_defines();
entries = 1;
mem_array[0] = cpu_to_be32(PHYS_OFFSET);
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
/*
* The current Malta EVA configuration is "special" in that it
* always makes use of addresses in the upper half of the 32 bit
physical_memsize = 32 << 20;
}
- if (config_enabled(CONFIG_CPU_BIG_ENDIAN)) {
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
/*
* SOC-it swaps, or perhaps doesn't swap, when DMA'ing
* the last word of physical memory.
void __init fw_meminit(void)
{
- bool eva = config_enabled(CONFIG_EVA);
+ bool eva = IS_ENABLED(CONFIG_EVA);
free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
}
fdt = malta_dt_shim(fdt);
__dt_setup_arch(fdt);
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
/* EVA has already been configured in mach-malta/kernel-init.h */
pr_info("Enhanced Virtual Addressing (EVA) activated\n");
static inline void emit_load_func(unsigned int reg, ptr imm,
struct jit_ctx *ctx)
{
- if (config_enabled(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT)) {
/* At this point imm is always 64-bit */
emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
static inline u16 align_sp(unsigned int num)
{
/* Double word alignment for 32-bit, quadword for 64-bit */
- unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
+ unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
num = (num + (align - 1)) & -align;
return num;
}
static char *nlm_swiotlb;
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
}
static void nlm_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
static unsigned long pci_sram_allocated = 0xbc000000;
static void *mn10300_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
unsigned long addr;
void *ret;
}
static void mn10300_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) vaddr & ~0x20000000;
static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static dma_addr_t mn10300_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
return page_to_bus(page) + offset;
}
}
static void *nios2_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
}
static void nios2_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) CAC_ADDR((unsigned long) vaddr);
static int nios2_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
static dma_addr_t nios2_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *addr = page_address(page) + offset;
static void nios2_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
__dma_sync_for_cpu(phys_to_virt(dma_address), size, direction);
}
static void nios2_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *addr;
int i;
#include <linux/dma-mapping.h>
#include <linux/dma-debug.h>
#include <linux/export.h>
-#include <linux/dma-attrs.h>
#include <asm/cpuinfo.h>
#include <asm/spr_defs.h>
static void *
or1k_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long va;
void *page;
va = (unsigned long)page;
- if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
/*
* We need to iterate through the pages, clearing the dcache for
* them and setting the cache-inhibit bit.
static void
or1k_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long va = (unsigned long)vaddr;
struct mm_walk walk = {
.mm = &init_mm
};
- if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
/* walk_page_range shouldn't be able to fail here */
WARN_ON(walk_page_range(va, va + size, &walk));
}
or1k_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long cl;
dma_addr_t addr = page_to_phys(page) + offset;
static void
or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
/* Nothing special to do here... */
}
static int
or1k_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
- s->length, dir, NULL);
+ s->length, dir, 0);
}
return nents;
static void
or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
- or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
+ or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
}
}
__initcall(pcxl_dma_init);
static void *pa11_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
unsigned long vaddr;
unsigned long paddr;
}
static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
int order;
static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
void *addr = page_address(page) + offset;
BUG_ON(direction == DMA_NONE);
static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(direction == DMA_NONE);
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
};
static void *pcx_dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
void *addr;
- if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
return NULL;
addr = (void *)__get_free_pages(flag, get_order(size));
}
static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t iova, struct dma_attrs *attrs)
+ dma_addr_t iova, unsigned long attrs)
{
free_pages((unsigned long)vaddr, get_order(size));
return;
/* need struct page definitions */
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
/* Some dma direct funcs must be visible for use in other dma_ops */
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void __dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern int dma_direct_mmap_coherent(struct device *dev,
struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle,
- size_t size, struct dma_attrs *attrs);
+ size_t size, unsigned long attrs);
#ifdef CONFIG_NOT_COHERENT_CACHE
/*
long index, long npages,
unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#ifdef CONFIG_IOMMU_API
/*
* Exchanges existing TCE with new TCE plus direction bits;
struct scatterlist *sglist, int nelems,
unsigned long mask,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
struct scatterlist *sglist,
int nelems,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
size_t size, dma_addr_t *dma_handle,
struct page *page, unsigned long offset,
size_t size, unsigned long mask,
enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void iommu_init_early_pSeries(void);
extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
#include <linux/types.h>
#include <asm/feature-fixups.h>
+#include <asm/asm-compat.h>
#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
#define JUMP_LABEL_NOP_SIZE 4
*/
static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size,
dma_handle, dev->coherent_dma_mask, flag,
static void dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
iommu_free_coherent(get_iommu_table_base(dev), size, vaddr, dma_handle);
}
static dma_addr_t dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return iommu_map_page(dev, get_iommu_table_base(dev), page, offset,
size, device_to_mask(dev), direction, attrs);
static void dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
iommu_unmap_page(get_iommu_table_base(dev), dma_handle, size, direction,
attrs);
static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return ppc_iommu_map_sg(dev, get_iommu_table_base(dev), sglist, nelems,
device_to_mask(dev), direction, attrs);
static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
ppc_iommu_unmap_sg(get_iommu_table_base(dev), sglist, nelems,
direction, attrs);
void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
void __dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef CONFIG_NOT_COHERENT_CACHE
__dma_free_coherent(size, vaddr);
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *iommu;
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *iommu;
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long pfn;
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
unsigned long offset,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(dir == DMA_NONE);
__dma_sync_page(page, offset, size, dir);
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
}
size_t size,
dma_addr_t *dma_handle,
gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *mem;
static void ibmebus_free_coherent(struct device *dev,
size_t size, void *vaddr,
dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
kfree(vaddr);
}
unsigned long offset,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return (dma_addr_t)(page_address(page) + offset);
}
dma_addr_t dma_addr,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return;
}
static int ibmebus_map_sg(struct device *dev,
struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static void ibmebus_unmap_sg(struct device *dev,
struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return;
}
void *page, unsigned int npages,
enum dma_data_direction direction,
unsigned long mask, unsigned int align_order,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long entry;
dma_addr_t ret = DMA_ERROR_CODE;
int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
struct scatterlist *sglist, int nelems,
unsigned long mask, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_next = 0, dma_addr;
struct scatterlist *s, *outs, *segstart;
void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
struct page *page, unsigned long offset, size_t size,
unsigned long mask, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_handle = DMA_ERROR_CODE;
void *vaddr;
void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned int npages;
nio_pages = size >> tbl->it_page_shift;
io_order = get_iommu_order(size, tbl);
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
- mask >> tbl->it_page_shift, io_order, NULL);
+ mask >> tbl->it_page_shift, io_order, 0);
if (mapping == DMA_ERROR_CODE) {
free_pages((unsigned long)ret, order);
return NULL;
static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
void *ret;
static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
static void vio_dma_iommu_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct vio_dev *viodev = to_vio_dev(dev);
struct iommu_table *tbl;
static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
unsigned long *io_pte, base_pte;
base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
#endif
- if (unlikely(dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs)))
+ if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING))
base_pte &= ~CBE_IOPTE_SO_RW;
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
__set_bit(0, window->table.it_map);
tce_build_cell(&window->table, window->table.it_offset, 1,
- (unsigned long)iommu->pad_page, DMA_TO_DEVICE, NULL);
+ (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
return window;
}
static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (iommu_fixed_is_weak)
return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
static void dma_fixed_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (iommu_fixed_is_weak)
iommu_free_coherent(cell_get_iommu_table(dev), size, vaddr,
static dma_addr_t dma_fixed_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
return dma_direct_ops.map_page(dev, page, offset, size,
direction, attrs);
else
static void dma_fixed_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
dma_direct_ops.unmap_page(dev, dma_addr, size, direction,
attrs);
else
static int dma_fixed_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
return dma_direct_ops.map_sg(dev, sg, nents, direction, attrs);
else
return ppc_iommu_map_sg(dev, cell_get_iommu_table(dev), sg,
static void dma_fixed_unmap_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- if (iommu_fixed_is_weak == dma_get_attr(DMA_ATTR_WEAK_ORDERING, attrs))
+ if (iommu_fixed_is_weak == (attrs & DMA_ATTR_WEAK_ORDERING))
dma_direct_ops.unmap_sg(dev, sg, nents, direction, attrs);
else
ppc_iommu_unmap_sg(cell_get_iommu_table(dev), sg, nents,
pciep = of_find_node_by_type(NULL, "pcie-endpoint");
if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
- iommu_fixed_is_weak = 1;
+ iommu_fixed_is_weak = DMA_ATTR_WEAK_ORDERING;
of_node_put(pciep);
static int iobmap_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u32 *ip;
u32 rpn;
static void *dma_npu_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return NULL;
static void dma_npu_free(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
}
static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
NPU_DMA_OP_UNSUPPORTED();
return 0;
static int pnv_ioda1_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
attrs);
static int pnv_ioda2_tce_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int ret = pnv_tce_build(tbl, index, npages, uaddr, direction,
attrs);
int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 proto_tce = iommu_direction_to_tce_perm(direction);
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
extern struct pci_ops pnv_pci_ops;
extern int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void pnv_tce_free(struct iommu_table *tbl, long index, long npages);
extern int pnv_tce_xchg(struct iommu_table *tbl, long index,
unsigned long *hpa, enum dma_data_direction *direction);
*/
static void * ps3_alloc_coherent(struct device *_dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int result;
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
}
static void ps3_free_coherent(struct device *_dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
static dma_addr_t ps3_sb_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
static dma_addr_t ps3_ioc0_map_page(struct device *_dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
}
static void ps3_unmap_page(struct device *_dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction direction, unsigned long attrs)
{
struct ps3_system_bus_device *dev = ps3_dev_to_system_bus_dev(_dev);
int result;
}
static int ps3_sb_map_sg(struct device *_dev, struct scatterlist *sgl,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
static int ps3_ioc0_map_sg(struct device *_dev, struct scatterlist *sg,
int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG();
return 0;
}
static void ps3_sb_unmap_sg(struct device *_dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
#if defined(CONFIG_PS3_DYNAMIC_DMA)
BUG_ON("do");
static void ps3_ioc0_unmap_sg(struct device *_dev, struct scatterlist *sg,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG();
}
static int tce_build_pSeries(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 proto_tce;
__be64 *tcep, *tces;
static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce, tce;
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 rc = 0;
u64 proto_tce;
static int dart_build(struct iommu_table *tbl, long index,
long npages, unsigned long uaddr,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned int *dp, *orig_dp;
unsigned int rpn;
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/io.h>
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long nr_pages, iommu_page_index;
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long iommu_page_index;
static void *s390_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
struct page *page;
pa = page_to_phys(page);
memset((void *) pa, 0, size);
- map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, NULL);
+ map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, map)) {
free_pages(pa, get_order(size));
return NULL;
static void s390_dma_free(struct device *dev, size_t size,
void *pa, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
size = PAGE_ALIGN(size);
atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
- s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
+ s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
free_pages((unsigned long) pa, get_order(size));
}
static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int mapped_elements = 0;
struct scatterlist *s;
for_each_sg(sg, s, nr_elements, i) {
struct page *page = sg_page(s);
s->dma_address = s390_dma_map_pages(dev, page, s->offset,
- s->length, dir, NULL);
+ s->length, dir, 0);
if (!dma_mapping_error(dev, s->dma_address)) {
s->dma_length = s->length;
mapped_elements++;
for_each_sg(sg, s, mapped_elements, i) {
if (s->dma_address)
s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
- dir, NULL);
+ dir, 0);
s->dma_address = 0;
s->dma_length = 0;
}
static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int nr_elements, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nr_elements, i) {
- s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir, NULL);
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length, dir,
+ 0);
s->dma_address = 0;
s->dma_length = 0;
}
/* arch/sh/mm/consistent.c */
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#endif /* __ASM_SH_DMA_MAPPING_H */
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t addr = page_to_phys(page) + offset;
static int nommu_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret, *ret_nocache;
int order = get_order(size);
void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int order = get_order(size);
unsigned long pfn = dma_handle >> PAGE_SHIFT;
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long order, first_page;
struct iommu *iommu;
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
unsigned long order, npages;
static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
struct strbuf *strbuf;
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags, ctx;
struct scatterlist *sg;
*/
static void *sbus_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct platform_device *op = to_platform_device(dev);
unsigned long len_total = PAGE_ALIGN(len);
}
static void sbus_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, struct dma_attrs *attrs)
+ dma_addr_t ba, unsigned long attrs)
{
struct resource *res;
struct page *pgv;
static dma_addr_t sbus_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t len,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = page_address(page) + offset;
}
static void sbus_unmap_page(struct device *dev, dma_addr_t ba, size_t n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
mmu_release_scsi_one(dev, ba, n);
}
static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
mmu_get_scsi_sgl(dev, sg, n);
return n;
}
static void sbus_unmap_sg(struct device *dev, struct scatterlist *sg, int n,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
mmu_release_scsi_sgl(dev, sg, n);
}
*/
static void *pci32_alloc_coherent(struct device *dev, size_t len,
dma_addr_t *pba, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long len_total = PAGE_ALIGN(len);
void *va;
* past this call are illegal.
*/
static void pci32_free_coherent(struct device *dev, size_t n, void *p,
- dma_addr_t ba, struct dma_attrs *attrs)
+ dma_addr_t ba, unsigned long attrs)
{
struct resource *res;
static dma_addr_t pci32_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
/* IIep is write-through, not flushing. */
return page_to_phys(page) + offset;
}
static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
if (dir != PCI_DMA_TODEVICE)
dma_make_coherent(ba, PAGE_ALIGN(size));
*/
static int pci32_map_sg(struct device *device, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int n;
*/
static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int n;
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long flags, order, first_page, npages, n;
struct iommu *iommu;
}
static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
- dma_addr_t dvma, struct dma_attrs *attrs)
+ dma_addr_t dvma, unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t sz,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu *iommu;
unsigned long flags, npages, oaddr;
static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot;
static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct pci_pbm_info *pbm;
struct scatterlist *sg;
}
PERCPU_SECTION(SMP_CACHE_BYTES)
+#ifdef CONFIG_JUMP_LABEL
+ . = ALIGN(PAGE_SIZE);
+ .exit.text : {
+ EXIT_TEXT
+ }
+#endif
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
BSS_SECTION(0, 0, 0)
static void *tile_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 dma_mask = (dev && dev->coherent_dma_mask) ?
dev->coherent_dma_mask : DMA_BIT_MASK(32);
*/
static void tile_dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
homecache_free_pages((unsigned long)vaddr, get_order(size));
}
static int tile_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static void tile_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static dma_addr_t tile_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
static void tile_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
static void *tile_pci_dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int node = dev_to_node(dev);
int order = get_order(size);
*/
static void tile_pci_dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
homecache_free_pages((unsigned long)vaddr, get_order(size));
}
static int tile_pci_dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static void tile_pci_dma_unmap_sg(struct device *dev,
struct scatterlist *sglist, int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
static dma_addr_t tile_pci_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
static void tile_pci_dma_unmap_page(struct device *dev, dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
BUG_ON(!valid_dma_direction(direction));
#ifdef CONFIG_SWIOTLB
static void *tile_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
gfp |= GFP_DMA;
return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
static void tile_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
/* "Init" is divided into two areas with very different virtual addresses. */
INIT_TEXT_SECTION(PAGE_SIZE)
+ /*
+ * Some things, like the __jump_table, may contain symbol references
+ * to __exit text, so include such text in the final image if so.
+ * In that case we also override the _einittext from INIT_TEXT_SECTION.
+ */
+#ifdef CONFIG_JUMP_LABEL
+ .exit.text : {
+ EXIT_TEXT
+ _einittext = .;
+ }
+#endif
+
/* Now we skip back to PAGE_OFFSET for the data. */
. = (. - TEXT_OFFSET + PAGE_OFFSET);
#undef LOAD_OFFSET
static void *unicore_swiotlb_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
}
static void unicore_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
}
#include <linux/kmemcheck.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
-#include <linux/dma-attrs.h>
#include <asm/io.h>
#include <asm/swiotlb.h>
#include <linux/dma-contiguous.h>
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void dma_generic_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
*/
static inline int mmap_is_ia32(void)
{
- return config_enabled(CONFIG_X86_32) ||
- (config_enabled(CONFIG_COMPAT) &&
+ return IS_ENABLED(CONFIG_X86_32) ||
+ (IS_ENABLED(CONFIG_COMPAT) &&
test_thread_flag(TIF_ADDR32));
}
static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
{
- if (config_enabled(CONFIG_X86_32))
+ if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
- else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
/* See comment in copy_fxregs_to_kernel() below. */
{
int err;
- if (config_enabled(CONFIG_X86_32)) {
+ if (IS_ENABLED(CONFIG_X86_32)) {
err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
- if (config_enabled(CONFIG_AS_FXSAVEQ)) {
+ if (IS_ENABLED(CONFIG_AS_FXSAVEQ)) {
err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
} else {
/* See comment in copy_fxregs_to_kernel() below. */
static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
{
- if (config_enabled(CONFIG_X86_32))
+ if (IS_ENABLED(CONFIG_X86_32))
return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
- else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
/* See comment in copy_fxregs_to_kernel() below. */
static inline void copy_fxregs_to_kernel(struct fpu *fpu)
{
- if (config_enabled(CONFIG_X86_32))
+ if (IS_ENABLED(CONFIG_X86_32))
asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
- else if (config_enabled(CONFIG_AS_FXSAVEQ))
+ else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
else {
/* Using "rex64; fxsave %0" is broken because, if the memory
#ifdef CONFIG_X86_64
static inline bool is_64bit_mm(struct mm_struct *mm)
{
- return !config_enabled(CONFIG_IA32_EMULATION) ||
+ return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
!(mm->context.ia32_compat == TIF_IA32);
}
#else
extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs);
+ unsigned long attrs);
#endif /* _ASM_X86_SWIOTLB_H */
#define _ASM_X86_XEN_PAGE_COHERENT_H
#include <asm/page.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vstart = (void*)__get_free_pages(flags, get_order(size));
*dma_handle = virt_to_phys(vstart);
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
free_pages((unsigned long) cpu_addr, get_order(size));
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs) { }
+ enum dma_data_direction dir, unsigned long attrs) { }
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs) { }
+ unsigned long attrs) { }
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) { }
static dma_addr_t gart_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long bus;
phys_addr_t paddr = page_to_phys(page) + offset;
*/
static void gart_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long iommu_page;
int npages;
* Wrapper for pci_unmap_single working with scatterlists.
*/
static void gart_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s;
int i;
for_each_sg(sg, s, nents, i) {
if (!s->dma_length || !s->length)
break;
- gart_unmap_page(dev, s->dma_address, s->dma_length, dir, NULL);
+ gart_unmap_page(dev, s->dma_address, s->dma_length, dir, 0);
}
}
addr = dma_map_area(dev, addr, s->length, dir, 0);
if (addr == bad_dma_addr) {
if (i > 0)
- gart_unmap_sg(dev, sg, i, dir, NULL);
+ gart_unmap_sg(dev, sg, i, dir, 0);
nents = 0;
sg[0].dma_length = 0;
break;
* Merge chunks that have page aligned sizes into a continuous mapping.
*/
static int gart_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *s, *ps, *start_sg, *sgmap;
int need = 0, nextneed, i, out, start;
error:
flush_gart();
- gart_unmap_sg(dev, sg, out, dir, NULL);
+ gart_unmap_sg(dev, sg, out, dir, 0);
/* When it was forced or merged try again in a dumb way */
if (force_iommu || iommu_merge) {
/* allocate and map a coherent mapping */
static void *
gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,
- gfp_t flag, struct dma_attrs *attrs)
+ gfp_t flag, unsigned long attrs)
{
dma_addr_t paddr;
unsigned long align_mask;
/* free a coherent mapping */
static void
gart_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, unsigned long attrs)
{
- gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
+ gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0);
dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
*/
static int __init parse_lapic(char *arg)
{
- if (config_enabled(CONFIG_X86_32) && !arg)
+ if (IS_ENABLED(CONFIG_X86_32) && !arg)
force_enable_local_apic = 1;
else if (arg && !strncmp(arg, "notscdeadline", 13))
setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
struct apic_chip_data *data = irq_data->chip_data;
int err, irq = irq_data->irq;
- if (!config_enabled(CONFIG_SMP))
+ if (!IS_ENABLED(CONFIG_SMP))
return -EPERM;
if (!cpumask_intersects(dest, cpu_online_mask))
struct task_struct *tsk = current;
int ia32_fxstate = (buf != buf_fx);
- ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
- config_enabled(CONFIG_IA32_EMULATION));
+ ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
if (!access_ok(VERIFY_WRITE, buf, size))
return -EACCES;
u64 xfeatures = 0;
int fx_only = 0;
- ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
- config_enabled(CONFIG_IA32_EMULATION));
+ ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
if (!buf) {
fpu__clear(fpu);
fx_sw_reserved.xfeatures = xfeatures_mask;
fx_sw_reserved.xstate_size = fpu_user_xstate_size;
- if (config_enabled(CONFIG_IA32_EMULATION) ||
- config_enabled(CONFIG_X86_32)) {
+ if (IS_ENABLED(CONFIG_IA32_EMULATION) ||
+ IS_ENABLED(CONFIG_X86_32)) {
int fsave_header_size = sizeof(struct fregs_state);
fx_sw_reserved_ia32 = fx_sw_reserved;
static void calgary_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems,enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
struct scatterlist *s;
static int calgary_map_sg(struct device *dev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
struct scatterlist *s;
return nelems;
error:
- calgary_unmap_sg(dev, sg, nelems, dir, NULL);
+ calgary_unmap_sg(dev, sg, nelems, dir, 0);
for_each_sg(sg, s, nelems, i) {
sg->dma_address = DMA_ERROR_CODE;
sg->dma_length = 0;
static dma_addr_t calgary_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vaddr = page_address(page) + offset;
unsigned long uaddr;
static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct iommu_table *tbl = find_iommu_table(dev);
unsigned int npages;
}
static void* calgary_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
{
void *ret = NULL;
dma_addr_t mapping;
static void calgary_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned int npages;
struct iommu_table *tbl = find_iommu_table(dev);
}
void *dma_generic_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long dma_mask;
struct page *page;
}
void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_addr, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, unsigned long attrs)
{
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = virt_to_page(vaddr);
static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t bus = page_to_phys(page) + offset;
WARN_ON(size == 0);
*/
static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vaddr;
void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
swiotlb_free_coherent(dev, size, vaddr, dma_addr);
buf = (void __user *)buf_val;
} get_user_catch(err);
- err |= fpu__restore_sig(buf, config_enabled(CONFIG_X86_32));
+ err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
force_iret();
struct fpu *fpu = ¤t->thread.fpu;
/* redzone */
- if (config_enabled(CONFIG_X86_64))
+ if (IS_ENABLED(CONFIG_X86_64))
sp -= 128;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
- } else if (config_enabled(CONFIG_X86_32) &&
+ } else if (IS_ENABLED(CONFIG_X86_32) &&
!onsigstack &&
(regs->ss & 0xffff) != __USER_DS &&
!(ka->sa.sa_flags & SA_RESTORER) &&
}
if (fpu->fpstate_active) {
- sp = fpu__alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
+ sp = fpu__alloc_mathframe(sp, IS_ENABLED(CONFIG_X86_32),
&buf_fx, &math_size);
*fpstate = (void __user *)sp;
}
static inline int is_ia32_compat_frame(void)
{
- return config_enabled(CONFIG_IA32_EMULATION) &&
+ return IS_ENABLED(CONFIG_IA32_EMULATION) &&
test_thread_flag(TIF_IA32);
}
static inline int is_ia32_frame(void)
{
- return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
+ return IS_ENABLED(CONFIG_X86_32) || is_ia32_compat_frame();
}
static inline int is_x32_frame(void)
{
- return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
+ return IS_ENABLED(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
}
static int
size_t size,
dma_addr_t *dma_handle,
gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *vaddr;
}
static void *vmd_alloc(struct device *dev, size_t size, dma_addr_t *addr,
- gfp_t flag, struct dma_attrs *attrs)
+ gfp_t flag, unsigned long attrs)
{
return vmd_dma_ops(dev)->alloc(to_vmd_dev(dev), size, addr, flag,
attrs);
}
static void vmd_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t addr, struct dma_attrs *attrs)
+ dma_addr_t addr, unsigned long attrs)
{
return vmd_dma_ops(dev)->free(to_vmd_dev(dev), size, vaddr, addr,
attrs);
static int vmd_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return vmd_dma_ops(dev)->mmap(to_vmd_dev(dev), vma, cpu_addr, addr,
size, attrs);
static int vmd_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return vmd_dma_ops(dev)->get_sgtable(to_vmd_dev(dev), sgt, cpu_addr,
addr, size, attrs);
static dma_addr_t vmd_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return vmd_dma_ops(dev)->map_page(to_vmd_dev(dev), page, offset, size,
dir, attrs);
}
static void vmd_unmap_page(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
vmd_dma_ops(dev)->unmap_page(to_vmd_dev(dev), addr, size, dir, attrs);
}
static int vmd_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
return vmd_dma_ops(dev)->map_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void vmd_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
vmd_dma_ops(dev)->unmap_sg(to_vmd_dev(dev), sg, nents, dir, attrs);
}
static void *xtensa_dma_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long ret;
unsigned long uncached = 0;
}
static void xtensa_dma_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long)vaddr +
XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t dma_handle = page_to_phys(page) + offset;
static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
}
static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
static void xtensa_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *s;
int i;
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
-config BLK_DEV_DAX
- bool "Block device DAX support"
- depends on FS_DAX
- depends on BROKEN
- help
- When DAX support is available (CONFIG_FS_DAX) raw block
- devices can also support direct userspace access to the
- storage capacity via MMAP(2) similar to a file on a
- DAX-enabled filesystem. However, the DAX I/O-path disables
- some standard I/O-statistics, and the MMAP(2) path has some
- operational differences due to bypassing the page
- cache. If in doubt, say N.
-
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
case BCMA_HOSTTYPE_PCI:
memset(out, 0, sizeof(struct ssb_sprom));
/* On BCM47XX all PCI buses share the same domain */
- if (config_enabled(CONFIG_BCM47XX))
+ if (IS_ENABLED(CONFIG_BCM47XX))
snprintf(buf, sizeof(buf), "pci/%u/%u/",
bus->host_pci->bus->number + 1,
PCI_SLOT(bus->host_pci->devfn));
config FPGA_MGR_ZYNQ_FPGA
tristate "Xilinx Zynq FPGA"
+ depends on HAS_DMA
help
FPGA manager driver support for Xilinx Zynq FPGAs.
ret = dma_mmap_attrs(to_dma_dev(helper->dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-attrs.h>
#include <linux/of.h>
#include <drm/drmP.h>
struct mutex cmdlist_mutex;
dma_addr_t cmdlist_pool;
void *cmdlist_pool_virt;
- struct dma_attrs cmdlist_dma_attrs;
+ unsigned long cmdlist_dma_attrs;
/* runqueue*/
struct g2d_runqueue_node *runqueue_node;
int ret;
struct g2d_buf_info *buf_info;
- init_dma_attrs(&g2d->cmdlist_dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
- &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_dma_attrs);
if (!g2d->cmdlist_pool_virt) {
dev_err(dev, "failed to allocate dma memory\n");
return -ENOMEM;
err:
dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
}
dma_free_attrs(to_dma_dev(subdrv->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
- g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
+ g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
}
}
static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
{
struct drm_device *dev = exynos_gem->base.dev;
- enum dma_attr attr;
+ unsigned long attr;
unsigned int nr_pages;
struct sg_table sgt;
int ret = -ENOMEM;
return 0;
}
- init_dma_attrs(&exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs = 0;
/*
* if EXYNOS_BO_CONTIG, fully physically contiguous memory
* as possible.
*/
if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
- dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
/*
* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping
else
attr = DMA_ATTR_NON_CONSISTENT;
- dma_set_attr(attr, &exynos_gem->dma_attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs |= attr;
+ exynos_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
nr_pages = exynos_gem->size >> PAGE_SHIFT;
exynos_gem->cookie = dma_alloc_attrs(to_dma_dev(dev), exynos_gem->size,
&exynos_gem->dma_addr, GFP_KERNEL,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (!exynos_gem->cookie) {
DRM_ERROR("failed to allocate buffer.\n");
goto err_free;
ret = dma_get_sgtable_attrs(to_dma_dev(dev), &sgt, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to get sgtable.\n");
goto err_dma_free;
sg_free_table(&sgt);
err_dma_free:
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
- exynos_gem->dma_addr, &exynos_gem->dma_attrs);
+ exynos_gem->dma_addr, exynos_gem->dma_attrs);
err_free:
drm_free_large(exynos_gem->pages);
dma_free_attrs(to_dma_dev(dev), exynos_gem->size, exynos_gem->cookie,
(dma_addr_t)exynos_gem->dma_addr,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
drm_free_large(exynos_gem->pages);
}
ret = dma_mmap_attrs(to_dma_dev(drm_dev), vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
- &exynos_gem->dma_attrs);
+ exynos_gem->dma_attrs);
if (ret < 0) {
DRM_ERROR("failed to mmap.\n");
return ret;
void *cookie;
void __iomem *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
struct page **pages;
struct sg_table *sgt;
};
obj = &mtk_gem->base;
- init_dma_attrs(&mtk_gem->dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
mtk_gem->cookie = dma_alloc_attrs(priv->dma_dev, obj->size,
&mtk_gem->dma_addr, GFP_KERNEL,
- &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs);
if (!mtk_gem->cookie) {
DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
ret = -ENOMEM;
drm_prime_gem_destroy(obj, mtk_gem->sg);
else
dma_free_attrs(priv->dma_dev, obj->size, mtk_gem->cookie,
- mtk_gem->dma_addr, &mtk_gem->dma_attrs);
+ mtk_gem->dma_addr, mtk_gem->dma_attrs);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
- mtk_gem->dma_addr, obj->size, &mtk_gem->dma_attrs);
+ mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
ret = dma_get_sgtable_attrs(priv->dma_dev, sgt, mtk_gem->cookie,
mtk_gem->dma_addr, obj->size,
- &mtk_gem->dma_attrs);
+ mtk_gem->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
void *cookie;
void *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
struct sg_table *sg;
};
}
if (priv->vram.paddr) {
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
drm_mm_takedown(&priv->vram.mm);
dma_free_attrs(dev, priv->vram.size, NULL,
- priv->vram.paddr, &attrs);
+ priv->vram.paddr, attrs);
}
component_unbind_all(dev, ddev);
}
if (size) {
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long attrs = 0;
void *p;
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
+ attrs |= DMA_ATTR_WRITE_COMBINE;
/* note that for no-kernel-mapping, the vaddr returned
* is bogus, but non-null if allocation succeeded:
*/
p = dma_alloc_attrs(dev->dev, size,
- &priv->vram.paddr, GFP_KERNEL, &attrs);
+ &priv->vram.paddr, GFP_KERNEL, attrs);
if (!p) {
dev_err(dev->dev, "failed to allocate VRAM\n");
priv->vram.paddr = 0;
u16 iommu_bit;
/* Only used by DMA API */
- struct dma_attrs attrs;
+ unsigned long attrs;
};
#define gk20a_instmem(p) container_of((p), struct gk20a_instmem, base)
goto out;
dma_free_attrs(dev, node->base.mem.size << PAGE_SHIFT, node->base.vaddr,
- node->handle, &imem->attrs);
+ node->handle, imem->attrs);
out:
return node;
node->base.vaddr = dma_alloc_attrs(dev, npages << PAGE_SHIFT,
&node->handle, GFP_KERNEL,
- &imem->attrs);
+ imem->attrs);
if (!node->base.vaddr) {
nvkm_error(subdev, "cannot allocate DMA memory\n");
return -ENOMEM;
nvkm_info(&imem->base.subdev, "using IOMMU\n");
} else {
- init_dma_attrs(&imem->attrs);
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &imem->attrs);
- dma_set_attr(DMA_ATTR_WEAK_ORDERING, &imem->attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &imem->attrs);
+ imem->attrs = DMA_ATTR_NON_CONSISTENT |
+ DMA_ATTR_WEAK_ORDERING |
+ DMA_ATTR_WRITE_COMBINE;
nvkm_info(&imem->base.subdev, "using DMA API\n");
}
#include <drm/drm_gem.h>
#include <drm/drm_vma_manager.h>
-#include <linux/dma-attrs.h>
-
#include "rockchip_drm_drv.h"
#include "rockchip_drm_gem.h"
struct drm_gem_object *obj = &rk_obj->base;
struct drm_device *drm = obj->dev;
- init_dma_attrs(&rk_obj->dma_attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
+ rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;
if (!alloc_kmap)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);
+ rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
&rk_obj->dma_addr, GFP_KERNEL,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
if (!rk_obj->kvaddr) {
DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
return -ENOMEM;
struct drm_device *drm = obj->dev;
dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
}
static int rockchip_drm_gem_object_mmap(struct drm_gem_object *obj,
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
- obj->size, &rk_obj->dma_attrs);
+ obj->size, rk_obj->dma_attrs);
if (ret)
drm_gem_vm_close(vma);
ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
rk_obj->dma_addr, obj->size,
- &rk_obj->dma_attrs);
+ rk_obj->dma_attrs);
if (ret) {
DRM_ERROR("failed to allocate sgt, %d\n", ret);
kfree(sgt);
{
struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
- if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs))
+ if (rk_obj->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
return NULL;
return rk_obj->kvaddr;
void *kvaddr;
dma_addr_t dma_addr;
- struct dma_attrs dma_attrs;
+ unsigned long dma_attrs;
};
struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
#include <linux/sched.h>
#include <linux/export.h>
#include <linux/hugetlb.h>
-#include <linux/dma-attrs.h>
#include <linux/slab.h>
#include <rdma/ib_umem_odp.h>
unsigned long npages;
int ret;
int i;
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long dma_attrs = 0;
struct scatterlist *sg, *sg_list_start;
int need_release = 0;
if (dmasync)
- dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
+ dma_attrs |= DMA_ATTR_WRITE_BARRIER;
if (!size)
return ERR_PTR(-EINVAL);
umem->sg_head.sgl,
umem->npages,
DMA_BIDIRECTIONAL,
- &attrs);
+ dma_attrs);
if (umem->nmap <= 0) {
ret = -ENOMEM;
static dma_addr_t map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = page_to_phys(page) + offset;
struct protection_domain *domain;
* The exported unmap_single function for dma_ops.
*/
static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct protection_domain *domain;
struct dma_ops_domain *dma_dom;
*/
static int map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int mapped_pages = 0, npages = 0, prot = 0, i;
struct protection_domain *domain;
*/
static void unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct protection_domain *domain;
struct dma_ops_domain *dma_dom;
*/
static void *alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
u64 dma_mask = dev->coherent_dma_mask;
struct protection_domain *domain;
*/
static void free_coherent(struct device *dev, size_t size,
void *virt_addr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct protection_domain *domain;
struct dma_ops_domain *dma_dom;
* or NULL on failure.
*/
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+ unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t))
{
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
} else {
size = ALIGN(size, min_size);
}
- if (dma_get_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, attrs))
+ if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
alloc_sizes = min_size;
count = PAGE_ALIGN(size) >> PAGE_SHIFT;
}
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
}
}
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
/*
* The scatterlist segments are mapped into a single
static dma_addr_t intel_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return __intel_map_single(dev, page_to_phys(page) + offset, size,
dir, *dev->dma_mask);
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct page *page = NULL;
int order;
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
int order;
struct page *page = virt_to_page(vaddr);
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
unsigned long nrpages = 0;
}
static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
int i;
struct dmar_domain *domain;
pending_reg += gic_reg_step;
intrmask_reg += gic_reg_step;
- if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+ if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
continue;
pending[i] |= (u64)gic_read(pending_reg) << 32;
ret = cxd2841er_get_carrier_offset_i(
priv, p->bandwidth_hz,
&carrier_offset);
+ if (ret)
+ return ret;
break;
case SYS_DVBT:
ret = cxd2841er_get_carrier_offset_t(
priv, p->bandwidth_hz,
&carrier_offset);
+ if (ret)
+ return ret;
break;
case SYS_DVBT2:
ret = cxd2841er_get_carrier_offset_t2(
priv, p->bandwidth_hz,
&carrier_offset);
+ if (ret)
+ return ret;
break;
case SYS_DVBC_ANNEX_A:
ret = cxd2841er_get_carrier_offset_c(
priv, &carrier_offset);
+ if (ret)
+ return ret;
break;
default:
dev_dbg(&priv->i2c->dev,
__func__, priv->system);
return -EINVAL;
}
- if (ret)
- return ret;
dev_dbg(&priv->i2c->dev, "%s(): carrier offset %d\n",
__func__, carrier_offset);
p->frequency += carrier_offset;
struct video_device *vfd_enc;
struct resource *res;
int i, j, ret;
- DEFINE_DMA_ATTRS(attrs);
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
goto err_enc_reg;
}
- /* Avoid the iommu eat big hunks */
- dma_set_attr(DMA_ATTR_ALLOC_SINGLE_PAGES, &attrs);
-
mtk_v4l2_debug(0, "encoder registered as /dev/video%d",
vfd_enc->num);
*/
void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
{
- if (ctx && ctx->node[0]) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ if (ctx && ctx->node[0])
dma_free_attrs(ctx->bdisp_dev->dev,
sizeof(struct bdisp_node) * MAX_NB_NODE,
- ctx->node[0], ctx->node_paddr[0], &attrs);
- }
+ ctx->node[0], ctx->node_paddr[0],
+ DMA_ATTR_WRITE_COMBINE);
}
/**
unsigned int i, node_size = sizeof(struct bdisp_node);
void *base;
dma_addr_t paddr;
- DEFINE_DMA_ATTRS(attrs);
/* Allocate all the nodes within a single memory page */
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
- GFP_KERNEL | GFP_DMA, &attrs);
+ GFP_KERNEL | GFP_DMA, DMA_ATTR_WRITE_COMBINE);
if (!base) {
dev_err(dev, "%s no mem\n", __func__);
return -ENOMEM;
{
int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
- if (bdisp_h_filter[0].virt) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ if (bdisp_h_filter[0].virt)
dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
- bdisp_h_filter[0].paddr, &attrs);
- }
+ bdisp_h_filter[0].paddr, DMA_ATTR_WRITE_COMBINE);
}
/**
unsigned int i, size;
void *base;
dma_addr_t paddr;
- DEFINE_DMA_ATTRS(attrs);
/* Allocate all the filters within a single memory page */
size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs);
+ base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA,
+ DMA_ATTR_WRITE_COMBINE);
if (!base)
return -ENOMEM;
unsigned long size;
void *cookie;
dma_addr_t dma_addr;
- struct dma_attrs attrs;
+ unsigned long attrs;
enum dma_data_direction dma_dir;
struct sg_table *dma_sgt;
struct frame_vector *vec;
kfree(buf->sgt_base);
}
dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- &buf->attrs);
+ buf->attrs);
put_device(buf->dev);
kfree(buf);
}
-static void *vb2_dc_alloc(struct device *dev, const struct dma_attrs *attrs,
+static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
{
return ERR_PTR(-ENOMEM);
if (attrs)
- buf->attrs = *attrs;
+ buf->attrs = attrs;
buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, &buf->attrs);
+ GFP_KERNEL | gfp_flags, buf->attrs);
if (!buf->cookie) {
dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
kfree(buf);
return ERR_PTR(-ENOMEM);
}
- if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->attrs))
+ if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
buf->vaddr = buf->cookie;
/* Prevent the device from being released while the buffer is used */
vma->vm_pgoff = 0;
ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, &buf->attrs);
+ buf->dma_addr, buf->size, buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
}
ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
- buf->size, &buf->attrs);
+ buf->size, buf->attrs);
if (ret < 0) {
dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
kfree(sgt);
struct page **pages;
if (sgt) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/*
* No need to sync to CPU, it's already synced to the CPU
* since the finish() memop will have been called before this.
*/
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
pages = frame_vector_pages(buf->vec);
/* sgt should exist only if vector contains pages... */
BUG_ON(IS_ERR(pages));
struct sg_table *sgt;
unsigned long contig_size;
unsigned long dma_align = dma_get_cache_alignment();
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
/* Only cache aligned DMA transfers are reliable */
if (!IS_ALIGNED(vaddr | size, dma_align)) {
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (sgt->nents <= 0) {
pr_err("failed to map scatterlist\n");
ret = -EIO;
fail_map_sg:
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
fail_sgt_init:
sg_free_table(sgt);
return 0;
}
-static void *vb2_dma_sg_alloc(struct device *dev, const struct dma_attrs *dma_attrs,
+static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
{
struct sg_table *sgt;
int ret;
int num_pages;
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
if (WARN_ON(dev == NULL))
return NULL;
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents)
goto fail_map;
int i = buf->num_pages;
if (atomic_dec_and_test(&buf->refcount)) {
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
- DEFINE_DMA_ATTRS(attrs);
struct frame_vector *vec;
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
buf = kzalloc(sizeof *buf, GFP_KERNEL);
if (!buf)
return NULL;
* prepare() memop is called.
*/
sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
- buf->dma_dir, &attrs);
+ buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
if (!sgt->nents)
goto userptr_fail_map;
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = &buf->sg_table;
int i = buf->num_pages;
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
__func__, buf->num_pages);
dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
- &attrs);
+ DMA_ATTR_SKIP_CPU_SYNC);
if (buf->vaddr)
vm_unmap_ram(buf->vaddr, buf->num_pages);
sg_free_table(buf->dma_sgt);
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(struct device *dev, const struct dma_attrs *attrs,
+static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags)
{
static dma_addr_t
_mic_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct mic_device *mdev = vpdev_to_mdev(dev);
static void _mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct mic_device *mdev = vpdev_to_mdev(dev);
static void *__mic_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
}
static void __mic_dma_free(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
static dma_addr_t
__mic_dma_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
static void
__mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
static int __mic_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
static void __mic_dma_unmap_sg(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scif_hw_dev *scdev = dev_get_drvdata(dev);
struct mic_device *mdev = scdev_to_mdev(scdev);
static dma_addr_t
mic_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *va = phys_to_virt(page_to_phys(page)) + offset;
struct mic_device *mdev = dev_get_drvdata(dev->parent);
static void
mic_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct mic_device *mdev = dev_get_drvdata(dev->parent);
mic_unmap_single(mdev, dma_addr, size);
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
/* Nothing more in higher memory on BCM47XX (MIPS) */
- if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
+ if (IS_ENABLED(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
ar->id.subsystem_vendor, ar->id.subsystem_device);
ath10k_info(ar, "kconfig debug %d debugfs %d tracing %d dfs %d testmode %d\n",
- config_enabled(CONFIG_ATH10K_DEBUG),
- config_enabled(CONFIG_ATH10K_DEBUGFS),
- config_enabled(CONFIG_ATH10K_TRACING),
- config_enabled(CONFIG_ATH10K_DFS_CERTIFIED),
- config_enabled(CONFIG_NL80211_TESTMODE));
+ IS_ENABLED(CONFIG_ATH10K_DEBUG),
+ IS_ENABLED(CONFIG_ATH10K_DEBUGFS),
+ IS_ENABLED(CONFIG_ATH10K_TRACING),
+ IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED),
+ IS_ENABLED(CONFIG_NL80211_TESTMODE));
firmware = ar->normal_mode_fw.fw_file.firmware;
if (firmware)
debugfs_create_file("nf_cal_period", S_IRUSR | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_nf_cal_period);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
&fops_simulate_radar);
regpair = ar->ath_common.regulatory.regpair;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
nl_dfs_reg = ar->dfs_detector->region;
wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
} else {
ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
request->dfs_region);
result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
ar->hw->netdev_features = NETIF_F_HW_CSUM;
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
/* Init ath dfs pattern detector */
ar->ath_common.debug_mask = ATH_DBG_DFS;
ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
ieee80211_unregister_hw(ar->hw);
err_dfs_detector_exit:
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
err_free:
{
ieee80211_unregister_hw(ar->hw);
- if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
+ if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
ar->dfs_detector->exit(ar->dfs_detector);
kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
phyerr->tsf_timestamp, tsf, buf_len);
/* Skip event if DFS disabled */
- if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
+ if (!IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED))
return;
ATH10K_DFS_STAT_INC(ar, pulses_total);
BIT(NL80211_IFTYPE_P2P_CLIENT);
}
- if (config_enabled(CONFIG_ATH6KL_REGDOMAIN) &&
+ if (IS_ENABLED(CONFIG_ATH6KL_REGDOMAIN) &&
test_bit(ATH6KL_FW_CAPABILITY_REGDOMAIN, ar->fw_capabilities)) {
wiphy->reg_notifier = ath6kl_cfg80211_reg_notify;
ar->wiphy->features |= NL80211_FEATURE_CELL_BASE_REG_HINTS;
struct ath_hw *ah = spec_priv->ah;
u32 rxfilter;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
if (!ath9k_hw_ops(ah)->spectral_scan_trigger) {
char buf[32];
ssize_t len;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
len = min(count, sizeof(buf) - 1);
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
+ if (IS_ENABLED(CONFIG_ATH9K_DEBUGFS)) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_P2P_GO_CTWIN;
- if (!config_enabled(CONFIG_ATH9K_TX99)) {
+ if (!IS_ENABLED(CONFIG_ATH9K_TX99)) {
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
mutex_lock(&sc->mutex);
- if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
if (sc->cur_chan->nvifs >= 1) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
mutex_lock(&sc->mutex);
- if (config_enabled(CONFIG_ATH9K_TX99)) {
+ if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
mutex_unlock(&sc->mutex);
return -EOPNOTSUPP;
}
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
sc->ps_enabled = true;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
sc->ps_enabled = false;
struct ieee80211_channel *chan;
int pos;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return -EOPNOTSUPP;
spin_lock_bh(&common->cc_lock);
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return;
mutex_lock(&sc->mutex);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
u32 rfilt;
- if (config_enabled(CONFIG_ATH9K_TX99))
+ if (IS_ENABLED(CONFIG_ATH9K_TX99))
return 0;
rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
{
struct dfs_pattern_detector *dpd;
- if (!config_enabled(CONFIG_CFG80211_CERTIFICATION_ONUS))
+ if (!IS_ENABLED(CONFIG_CFG80211_CERTIFICATION_ONUS))
return NULL;
dpd = kmalloc(sizeof(*dpd), GFP_KERNEL);
static bool dynamic_country_user_possible(struct ath_regulatory *reg)
{
- if (config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
+ if (IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING))
return true;
switch (reg->country_code) {
static bool ath_reg_dyn_country_user_allow(struct ath_regulatory *reg)
{
- if (!config_enabled(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
+ if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS))
return false;
if (!dynamic_country_user_possible(reg))
return false;
static dma_addr_t
ccio_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return ccio_map_single(dev, page_address(page) + offset, size,
direction);
*/
static void
ccio_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
unsigned long flags;
*/
static void *
ccio_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
#if 0
*/
static void
ccio_free(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- ccio_unmap_page(dev, dma_handle, size, 0, NULL);
+ ccio_unmap_page(dev, dma_handle, size, 0, 0);
free_pages((unsigned long)cpu_addr, get_order(size));
}
*/
static int
ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
*/
static void
ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
#endif
ccio_unmap_page(dev, sg_dma_address(sglist),
- sg_dma_len(sglist), direction, NULL);
+ sg_dma_len(sglist), direction, 0);
++sglist;
}
static dma_addr_t
sba_map_page(struct device *dev, struct page *page, unsigned long offset,
size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return sba_map_single(dev, page_address(page) + offset, size,
direction);
*/
static void
sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
#if DELAYED_RESOURCE_CNT > 0
* See Documentation/DMA-API-HOWTO.txt
*/
static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
- gfp_t gfp, struct dma_attrs *attrs)
+ gfp_t gfp, unsigned long attrs)
{
void *ret;
*/
static void
sba_free(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
- sba_unmap_page(hwdev, dma_handle, size, 0, NULL);
+ sba_unmap_page(hwdev, dma_handle, size, 0, 0);
free_pages((unsigned long) vaddr, get_order(size));
}
*/
static int
sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
int coalesced, filled = 0;
*/
static void
sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
- enum dma_data_direction direction, struct dma_attrs *attrs)
+ enum dma_data_direction direction, unsigned long attrs)
{
struct ioc *ioc;
#ifdef ASSERT_PDIR_SANITY
while (sg_dma_len(sglist) && nents--) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
- direction, NULL);
+ direction, 0);
#ifdef SBA_COLLECT_STATS
ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
* since we have enough virtual address range available. On 32-bit, we
* ioremap the config space for each bus individually.
*/
-static const bool per_bus_mapping = !config_enabled(CONFIG_64BIT);
+static const bool per_bus_mapping = !IS_ENABLED(CONFIG_64BIT);
/*
* Create a PCI config space window
static int q6v5_mpss_init_image(struct q6v5 *qproc, const struct firmware *fw)
{
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long dma_attrs = DMA_ATTR_FORCE_CONTIGUOUS;
dma_addr_t phys;
void *ptr;
int ret;
- dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);
- ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
+ ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, dma_attrs);
if (!ptr) {
dev_err(qproc->dev, "failed to allocate mdt buffer\n");
return -ENOMEM;
else if (ret < 0)
dev_err(qproc->dev, "MPSS header authentication failed: %d\n", ret);
- dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);
+ dma_free_attrs(qproc->dev, fw->size, ptr, phys, dma_attrs);
return ret < 0 ? ret : 0;
}
static inline bool ar933x_uart_console_enabled(void)
{
- return config_enabled(CONFIG_SERIAL_AR933X_CONSOLE);
+ return IS_ENABLED(CONFIG_SERIAL_AR933X_CONSOLE);
}
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int ret;
np = pdev->dev.of_node;
- if (config_enabled(CONFIG_OF) && np) {
+ if (IS_ENABLED(CONFIG_OF) && np) {
id = of_alias_get_id(np, "serial");
if (id < 0) {
dev_err(&pdev->dev, "unable to get alias id, err=%d\n",
}
dma_free_attrs(fbdev->dev, rg->size, rg->token, rg->dma_handle,
- &rg->attrs);
+ rg->attrs);
rg->token = NULL;
rg->vaddr = NULL;
struct omapfb2_device *fbdev = ofbi->fbdev;
struct omapfb2_mem_region *rg;
void *token;
- DEFINE_DMA_ATTRS(attrs);
+ unsigned long attrs;
dma_addr_t dma_handle;
int r;
size = PAGE_ALIGN(size);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+ attrs = DMA_ATTR_WRITE_COMBINE;
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
- dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
+ attrs |= DMA_ATTR_NO_KERNEL_MAPPING;
DBG("allocating %lu bytes for fb %d\n", size, ofbi->id);
token = dma_alloc_attrs(fbdev->dev, size, &dma_handle,
- GFP_KERNEL, &attrs);
+ GFP_KERNEL, attrs);
if (token == NULL) {
dev_err(fbdev->dev, "failed to allocate framebuffer\n");
r = omap_vrfb_request_ctx(&rg->vrfb);
if (r) {
dma_free_attrs(fbdev->dev, size, token, dma_handle,
- &attrs);
+ attrs);
dev_err(fbdev->dev, "vrfb create ctx failed\n");
return r;
}
#endif
#include <linux/rwsem.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-mapping.h>
#include <video/omapfb_dss.h>
struct omapfb2_mem_region {
int id;
- struct dma_attrs attrs;
+ unsigned long attrs;
void *token;
dma_addr_t dma_handle;
u32 paddr;
void *
xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
int order = get_order(size);
void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
- dma_addr_t dev_addr, struct dma_attrs *attrs)
+ dma_addr_t dev_addr, unsigned long attrs)
{
int order = get_order(size);
phys_addr_t phys;
dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(phys);
*/
static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t paddr = xen_bus_to_phys(dev_addr);
void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
xen_unmap_single(hwdev, dev_addr, size, dir, attrs);
}
int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
void
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- if (IS_ENABLED(CONFIG_BLK_DEV_DAX) &&
- blk_queue_dax(disk->queue))
- bdev->bd_inode->i_flags = S_DAX;
- else
- bdev->bd_inode->i_flags = 0;
+ bdev->bd_inode->i_flags = 0;
if (!partno) {
ret = -ENXIO;
const unsigned long *src2, unsigned int nbits)
{
if (small_const_nbits(nbits))
- return ! ((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
+ return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
#ifdef CONFIG_S390
- else if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0)
+ if (__builtin_constant_p(nbits) && (nbits % BITS_PER_LONG) == 0)
return !memcmp(src1, src2, nbits / 8);
#endif
- else
- return __bitmap_equal(src1, src2, nbits);
+ return __bitmap_equal(src1, src2, nbits);
}
static inline int bitmap_intersects(const unsigned long *src1,
+++ /dev/null
-#ifndef _DMA_ATTR_H
-#define _DMA_ATTR_H
-
-#include <linux/bitmap.h>
-#include <linux/bitops.h>
-#include <linux/bug.h>
-
-/**
- * an enum dma_attr represents an attribute associated with a DMA
- * mapping. The semantics of each attribute should be defined in
- * Documentation/DMA-attributes.txt.
- */
-enum dma_attr {
- DMA_ATTR_WRITE_BARRIER,
- DMA_ATTR_WEAK_ORDERING,
- DMA_ATTR_WRITE_COMBINE,
- DMA_ATTR_NON_CONSISTENT,
- DMA_ATTR_NO_KERNEL_MAPPING,
- DMA_ATTR_SKIP_CPU_SYNC,
- DMA_ATTR_FORCE_CONTIGUOUS,
- DMA_ATTR_ALLOC_SINGLE_PAGES,
- DMA_ATTR_MAX,
-};
-
-#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
-
-/**
- * struct dma_attrs - an opaque container for DMA attributes
- * @flags - bitmask representing a collection of enum dma_attr
- */
-struct dma_attrs {
- unsigned long flags[__DMA_ATTRS_LONGS];
-};
-
-#define DEFINE_DMA_ATTRS(x) \
- struct dma_attrs x = { \
- .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \
- }
-
-static inline void init_dma_attrs(struct dma_attrs *attrs)
-{
- bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
-}
-
-/**
- * dma_set_attr - set a specific attribute
- * @attr: attribute to set
- * @attrs: struct dma_attrs (may be NULL)
- */
-static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
- if (attrs == NULL)
- return;
- BUG_ON(attr >= DMA_ATTR_MAX);
- __set_bit(attr, attrs->flags);
-}
-
-/**
- * dma_get_attr - check for a specific attribute
- * @attr: attribute to set
- * @attrs: struct dma_attrs (may be NULL)
- */
-static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
- if (attrs == NULL)
- return 0;
- BUG_ON(attr >= DMA_ATTR_MAX);
- return test_bit(attr, attrs->flags);
-}
-
-#endif /* _DMA_ATTR_H */
* the arch code to take care of attributes and cache maintenance
*/
struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
- struct dma_attrs *attrs, int prot, dma_addr_t *handle,
+ unsigned long attrs, int prot, dma_addr_t *handle,
void (*flush_page)(struct device *, const void *, phys_addr_t));
void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
dma_addr_t *handle);
* directly as DMA mapping callbacks for simplicity
*/
void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir, unsigned long attrs);
int iommu_dma_supported(struct device *dev, u64 mask);
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
#include <linux/string.h>
#include <linux/device.h>
#include <linux/err.h>
-#include <linux/dma-attrs.h>
#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
#include <linux/kmemcheck.h>
#include <linux/bug.h>
+/**
+ * List of possible attributes associated with a DMA mapping. The semantics
+ * of each attribute should be defined in Documentation/DMA-attributes.txt.
+ *
+ * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
+ * forces all pending DMA writes to complete.
+ */
+#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+/*
+ * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
+ * may be weakly ordered, that is that reads and writes may pass each other.
+ */
+#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
+/*
+ * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
+ * buffered to improve performance.
+ */
+#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
+/*
+ * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
+ * consistent or non-consistent memory as it sees fit.
+ */
+#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
+/*
+ * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
+ * virtual mapping for the allocated buffer.
+ */
+#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
+/*
+ * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
+ * the CPU cache for the given buffer assuming that it has been already
+ * transferred to 'device' domain.
+ */
+#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
+/*
+ * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
+ * in physical memory.
+ */
+#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
+/*
+ * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
+ * that it's probably not worth the time to try to allocate memory to in a way
+ * that gives better TLB efficiency.
+ */
+#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
+
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
* It can be given to a device to use as a DMA source or target. A CPU cannot
struct dma_map_ops {
void* (*alloc)(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*free)(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
int (*mmap)(struct device *, struct vm_area_struct *,
- void *, dma_addr_t, size_t, struct dma_attrs *attrs);
+ void *, dma_addr_t, size_t,
+ unsigned long attrs);
int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
- dma_addr_t, size_t, struct dma_attrs *attrs);
+ dma_addr_t, size_t, unsigned long attrs);
dma_addr_t (*map_page)(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
/*
* map_sg returns 0 on error and a value > 0 on success.
* It should never return a value < 0.
*/
int (*map_sg)(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*unmap_sg)(struct device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
void (*sync_single_for_cpu)(struct device *dev,
dma_addr_t dma_handle, size_t size,
enum dma_data_direction dir);
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
dma_addr_t addr;
static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
*/
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
int i, ents;
static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
kmemcheck_mark_initialized(page_address(page) + offset, size);
BUG_ON(!valid_dma_direction(dir));
- addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ addr = ops->map_page(dev, page, offset, size, dir, 0);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr;
BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page)
- ops->unmap_page(dev, addr, size, dir, NULL);
+ ops->unmap_page(dev, addr, size, dir, 0);
debug_dma_unmap_page(dev, addr, size, dir, false);
}
}
-#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
-#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
-#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
-#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size);
*/
static inline int
dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size, unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
int
dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
static inline int
dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
- dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+ dma_addr_t dma_addr, size_t size,
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
}
-#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#ifndef arch_dma_alloc_attrs
#define arch_dma_alloc_attrs(dev, flag) (true)
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr;
static inline void dma_free_attrs(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+ return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
- return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
}
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp,
+ DMA_ATTR_NON_CONSISTENT);
}
static inline void dma_free_noncoherent(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_handle)
{
- DEFINE_DMA_ATTRS(attrs);
-
- dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
- dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle,
+ DMA_ATTR_NON_CONSISTENT);
}
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
static inline void *dma_alloc_wc(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
+ return dma_alloc_attrs(dev, size, dma_addr, gfp,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_alloc_writecombine
#define dma_alloc_writecombine dma_alloc_wc
static inline void dma_free_wc(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
+ return dma_free_attrs(dev, size, cpu_addr, dma_addr,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_free_writecombine
#define dma_free_writecombine dma_free_wc
void *cpu_addr, dma_addr_t dma_addr,
size_t size)
{
- DEFINE_DMA_ATTRS(attrs);
- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
+ DMA_ATTR_WRITE_COMBINE);
}
#ifndef dma_mmap_writecombine
#define dma_mmap_writecombine dma_mmap_wc
#ifndef _DYNAMIC_DEBUG_H
#define _DYNAMIC_DEBUG_H
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#include <linux/jump_label.h>
+#endif
+
/*
* An instance of this structure is created in a special
* ELF section at every dynamic debug callsite. At runtime,
#define _DPRINTK_FLAGS_DEFAULT 0
#endif
unsigned int flags:8;
+#ifdef HAVE_JUMP_LABEL
+ union {
+ struct static_key_true dd_key_true;
+ struct static_key_false dd_key_false;
+ } key;
+#endif
} __attribute__((aligned(8)));
const struct net_device *dev,
const char *fmt, ...);
-#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+#define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \
static struct _ddebug __aligned(8) \
__attribute__((section("__verbose"))) name = { \
.modname = KBUILD_MODNAME, \
.filename = __FILE__, \
.format = (fmt), \
.lineno = __LINE__, \
- .flags = _DPRINTK_FLAGS_DEFAULT, \
+ .flags = _DPRINTK_FLAGS_DEFAULT, \
+ dd_key_init(key, init) \
}
+#ifdef HAVE_JUMP_LABEL
+
+#define dd_key_init(key, init) key = (init)
+
+#ifdef DEBUG
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \
+ (STATIC_KEY_TRUE_INIT))
+
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ static_branch_likely(&descriptor.key.dd_key_true)
+#else
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \
+ (STATIC_KEY_FALSE_INIT))
+
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ static_branch_unlikely(&descriptor.key.dd_key_false)
+#endif
+
+#else
+
+#define dd_key_init(key, init)
+
+#define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \
+ DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0)
+
+#ifdef DEBUG
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ likely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
+#else
+#define DYNAMIC_DEBUG_BRANCH(descriptor) \
+ unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)
+#endif
+
+#endif
+
#define dynamic_pr_debug(fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
#define dynamic_dev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_dev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
#define dynamic_netdev_dbg(dev, fmt, ...) \
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
__dynamic_netdev_dbg(&descriptor, dev, fmt, \
##__VA_ARGS__); \
} while (0)
do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \
__builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\
- if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor)) \
print_hex_dump(KERN_DEBUG, prefix_str, \
prefix_type, rowsize, groupsize, \
buf, len, ascii); \
#define FENCE_TRACE(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- if (config_enabled(CONFIG_FENCE_TRACE)) \
+ if (IS_ENABLED(CONFIG_FENCE_TRACE)) \
pr_info("f %llu#%u: " fmt, \
__ff->context, __ff->seqno, ##args); \
} while (0)
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/bug.h>
extern bool static_key_initialized;
struct module;
-#include <linux/atomic.h>
-
#ifdef HAVE_JUMP_LABEL
-static inline int static_key_count(struct static_key *key)
-{
- /*
- * -1 means the first static_key_slow_inc() is in progress.
- * static_key_enabled() must return true, so return 1 here.
- */
- int n = atomic_read(&key->enabled);
- return n >= 0 ? n : 1;
-}
-
#define JUMP_TYPE_FALSE 0UL
#define JUMP_TYPE_TRUE 1UL
#define JUMP_TYPE_MASK 1UL
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
extern void jump_label_apply_nops(struct module *mod);
+extern int static_key_count(struct static_key *key);
+extern void static_key_enable(struct static_key *key);
+extern void static_key_disable(struct static_key *key);
+/*
+ * We should be using ATOMIC_INIT() for initializing .enabled, but
+ * the inclusion of atomic.h is problematic for inclusion of jump_label.h
+ * in 'low-level' headers. Thus, we are initializing .enabled with a
+ * raw value, but have added a BUILD_BUG_ON() to catch any issues in
+ * jump_label_init() see: kernel/jump_label.c.
+ */
#define STATIC_KEY_INIT_TRUE \
- { .enabled = ATOMIC_INIT(1), \
+ { .enabled = { 1 }, \
.entries = (void *)JUMP_TYPE_TRUE }
#define STATIC_KEY_INIT_FALSE \
- { .enabled = ATOMIC_INIT(0), \
+ { .enabled = { 0 }, \
.entries = (void *)JUMP_TYPE_FALSE }
#else /* !HAVE_JUMP_LABEL */
+#include <linux/atomic.h>
+#include <linux/bug.h>
+
static inline int static_key_count(struct static_key *key)
{
return atomic_read(&key->enabled);
return 0;
}
-#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
-#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
-
-#endif /* HAVE_JUMP_LABEL */
-
-#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
-#define jump_label_enabled static_key_enabled
-
static inline void static_key_enable(struct static_key *key)
{
int count = static_key_count(key);
static_key_slow_dec(key);
}
+#define STATIC_KEY_INIT_TRUE { .enabled = ATOMIC_INIT(1) }
+#define STATIC_KEY_INIT_FALSE { .enabled = ATOMIC_INIT(0) }
+
+#endif /* HAVE_JUMP_LABEL */
+
+#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#define jump_label_enabled static_key_enabled
+
/* -------------------------------------------------------------------------- */
/*
#include <linux/types.h>
struct device;
-struct dma_attrs;
struct page;
struct scatterlist;
extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern int
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
extern int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs);
+ enum dma_data_direction dir,
+ unsigned long attrs);
extern void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
mutex_release(&ctx->dep_map, 0, _THIS_IP_);
DEBUG_LOCKS_WARN_ON(ctx->acquired);
- if (!config_enabled(CONFIG_PROVE_LOCKING))
+ if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
/*
* lockdep will normally handle this,
* but fail without anyway
*/
ctx->done_acquire = 1;
- if (!config_enabled(CONFIG_DEBUG_LOCK_ALLOC))
+ if (!IS_ENABLED(CONFIG_DEBUG_LOCK_ALLOC))
/* ensure ww_acquire_fini will still fail if called twice */
ctx->acquired = ~0U;
#endif
* #) Required ops for DMABUF types: attach_dmabuf, detach_dmabuf, map_dmabuf, unmap_dmabuf.
*/
struct vb2_mem_ops {
- void *(*alloc)(struct device *dev, const struct dma_attrs *attrs,
+ void *(*alloc)(struct device *dev, unsigned long attrs,
unsigned long size, enum dma_data_direction dma_dir,
gfp_t gfp_flags);
void (*put)(void *buf_priv);
* @io_modes: supported io methods (see vb2_io_modes enum)
* @dev: device to use for the default allocation context if the driver
* doesn't fill in the @alloc_devs array.
- * @dma_attrs: DMA attributes to use for the DMA. May be NULL.
+ * @dma_attrs: DMA attributes to use for the DMA.
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
unsigned int type;
unsigned int io_modes;
struct device *dev;
- const struct dma_attrs *dma_attrs;
+ unsigned long dma_attrs;
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
#include <media/videobuf2-v4l2.h>
#include <linux/dma-mapping.h>
-struct dma_attrs;
-
static inline dma_addr_t
vb2_dma_contig_plane_dma_addr(struct vb2_buffer *vb, unsigned int plane_no)
{
static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
void *cpu_addr, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
- direction, attrs);
+ direction, dma_attrs);
}
static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
u64 addr, size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
return dma_unmap_single_attrs(dev->dma_device, addr, size,
- direction, attrs);
+ direction, dma_attrs);
}
/**
static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
- return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+ return dma_map_sg_attrs(dev->dma_device, sg, nents, direction,
+ dma_attrs);
}
static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
struct scatterlist *sg, int nents,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long dma_attrs)
{
- dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+ dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, dma_attrs);
}
/**
* ib_sg_dma_address - Return the DMA address from a scatter/gather entry
extern void
*xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
xen_swiotlb_free_coherent(struct device *hwdev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern int
xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs);
+ unsigned long attrs);
extern void
xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
#include <linux/err.h>
#include <linux/static_key.h>
#include <linux/jump_label_ratelimit.h>
+#include <linux/bug.h>
#ifdef HAVE_JUMP_LABEL
static void jump_label_update(struct static_key *key);
+/*
+ * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
+ * The use of 'atomic_read()' requires atomic.h and its problematic for some
+ * kernel headers such as kernel.h and others. Since static_key_count() is not
+ * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
+ * to have it be a function here. Similarly, for 'static_key_enable()' and
+ * 'static_key_disable()', which require bug.h. This should allow jump_label.h
+ * to be included from most/all places for HAVE_JUMP_LABEL.
+ */
+int static_key_count(struct static_key *key)
+{
+ /*
+ * -1 means the first static_key_slow_inc() is in progress.
+ * static_key_enabled() must return true, so return 1 here.
+ */
+ int n = atomic_read(&key->enabled);
+
+ return n >= 0 ? n : 1;
+}
+EXPORT_SYMBOL_GPL(static_key_count);
+
+void static_key_enable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (!count)
+ static_key_slow_inc(key);
+}
+EXPORT_SYMBOL_GPL(static_key_enable);
+
+void static_key_disable(struct static_key *key)
+{
+ int count = static_key_count(key);
+
+ WARN_ON_ONCE(count < 0 || count > 1);
+
+ if (count)
+ static_key_slow_dec(key);
+}
+EXPORT_SYMBOL_GPL(static_key_disable);
+
void static_key_slow_inc(struct static_key *key)
{
int v, v1;
struct static_key *key = NULL;
struct jump_entry *iter;
+ /*
+ * Since we are initializing the static_key.enabled field with
+ * with the 'raw' int values (to avoid pulling in atomic.h) in
+ * jump_label.h, let's make sure that is safe. There are only two
+ * cases to check since we initialize to 0 or 1.
+ */
+ BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
+ BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
+
jump_label_lock();
jump_label_sort_entries(iter_start, iter_stop);
return -EINVAL;
if (unlikely(data & PTRACE_O_SUSPEND_SECCOMP)) {
- if (!config_enabled(CONFIG_CHECKPOINT_RESTORE) ||
- !config_enabled(CONFIG_SECCOMP))
+ if (!IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) ||
+ !IS_ENABLED(CONFIG_SECCOMP))
return -EINVAL;
if (!capable(CAP_SYS_ADMIN))
{
struct seccomp_filter *sfilter;
int ret;
- const bool save_orig = config_enabled(CONFIG_CHECKPOINT_RESTORE);
+ const bool save_orig = IS_ENABLED(CONFIG_CHECKPOINT_RESTORE);
if (fprog->len == 0 || fprog->len > BPF_MAXINSNS)
return ERR_PTR(-EINVAL);
{
int mode = current->seccomp.mode;
- if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
+ if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return;
int mode = current->seccomp.mode;
int this_syscall;
- if (config_enabled(CONFIG_CHECKPOINT_RESTORE) &&
+ if (IS_ENABLED(CONFIG_CHECKPOINT_RESTORE) &&
unlikely(current->ptrace & PT_SUSPEND_SECCOMP))
return 0;
static void *dma_noop_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
void *ret;
static void dma_noop_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
free_pages((unsigned long)cpu_addr, get_order(size));
}
static dma_addr_t dma_noop_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
return page_to_phys(page) + offset;
}
static int dma_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir,
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
newflags = (dp->flags & mask) | flags;
if (newflags == dp->flags)
continue;
+#ifdef HAVE_JUMP_LABEL
+ if (dp->flags & _DPRINTK_FLAGS_PRINT) {
+ if (!(flags & _DPRINTK_FLAGS_PRINT))
+ static_branch_disable(&dp->key.dd_key_true);
+ } else if (flags & _DPRINTK_FLAGS_PRINT)
+ static_branch_enable(&dp->key.dd_key_true);
+#endif
dp->flags = newflags;
vpr_info("changed %s:%d [%s]%s =%s\n",
trim_prefix(dp->filename), dp->lineno,
dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = phys_to_dma(dev, phys);
void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unmap_single(hwdev, dev_addr, size, dir);
}
*/
int
swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir, struct dma_attrs *attrs)
+ enum dma_data_direction dir, unsigned long attrs)
{
struct scatterlist *sg;
int i;
swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
- return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+ return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, 0);
}
EXPORT_SYMBOL(swiotlb_map_sg);
*/
void
swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir, struct dma_attrs *attrs)
+ int nelems, enum dma_data_direction dir,
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
enum dma_data_direction dir)
{
- return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+ return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, 0);
}
EXPORT_SYMBOL(swiotlb_unmap_sg);
ASSERT_RTNL();
- if (!config_enabled(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
+ if (!IS_ENABLED(CONFIG_CFG80211_REG_RELAX_NO_IR) ||
!(wiphy->regulatory_flags & REGULATORY_ENABLE_RELAX_NO_IR))
return false;
ret = register_jprobe(&my_jprobe);
if (ret < 0) {
- printk(KERN_INFO "register_jprobe failed, returned %d\n", ret);
+ pr_err("register_jprobe failed, returned %d\n", ret);
return -1;
}
- printk(KERN_INFO "Planted jprobe at %p, handler addr %p\n",
+ pr_info("Planted jprobe at %p, handler addr %p\n",
my_jprobe.kp.addr, my_jprobe.entry);
return 0;
}
static void __exit jprobe_exit(void)
{
unregister_jprobe(&my_jprobe);
- printk(KERN_INFO "jprobe at %p unregistered\n", my_jprobe.kp.addr);
+ pr_info("jprobe at %p unregistered\n", my_jprobe.kp.addr);
}
module_init(jprobe_init)
static int handler_pre(struct kprobe *p, struct pt_regs *regs)
{
#ifdef CONFIG_X86
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, ip = %lx,"
- " flags = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, ip = %lx, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->ip, regs->flags);
#endif
#ifdef CONFIG_PPC
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, nip = 0x%lx,"
- " msr = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, nip = 0x%lx, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->nip, regs->msr);
#endif
#ifdef CONFIG_MIPS
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx,"
- " status = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, epc = 0x%lx, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_epc, regs->cp0_status);
#endif
#ifdef CONFIG_TILEGX
- printk(KERN_INFO "<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx,"
- " ex1 = 0x%lx\n",
+ pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx, ex1 = 0x%lx\n",
p->symbol_name, p->addr, regs->pc, regs->ex1);
#endif
#ifdef CONFIG_ARM64
unsigned long flags)
{
#ifdef CONFIG_X86
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, flags = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, flags = 0x%lx\n",
p->symbol_name, p->addr, regs->flags);
#endif
#ifdef CONFIG_PPC
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, msr = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, msr = 0x%lx\n",
p->symbol_name, p->addr, regs->msr);
#endif
#ifdef CONFIG_MIPS
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, status = 0x%lx\n",
p->symbol_name, p->addr, regs->cp0_status);
#endif
#ifdef CONFIG_TILEGX
- printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
+ pr_info("<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
p->symbol_name, p->addr, regs->ex1);
#endif
#ifdef CONFIG_ARM64
*/
static int handler_fault(struct kprobe *p, struct pt_regs *regs, int trapnr)
{
- printk(KERN_INFO "fault_handler: p->addr = 0x%p, trap #%dn",
- p->addr, trapnr);
+ pr_info("fault_handler: p->addr = 0x%p, trap #%dn", p->addr, trapnr);
/* Return 0 because we don't handle the fault. */
return 0;
}
ret = register_kprobe(&kp);
if (ret < 0) {
- printk(KERN_INFO "register_kprobe failed, returned %d\n", ret);
+ pr_err("register_kprobe failed, returned %d\n", ret);
return ret;
}
- printk(KERN_INFO "Planted kprobe at %p\n", kp.addr);
+ pr_info("Planted kprobe at %p\n", kp.addr);
return 0;
}
static void __exit kprobe_exit(void)
{
unregister_kprobe(&kp);
- printk(KERN_INFO "kprobe at %p unregistered\n", kp.addr);
+ pr_info("kprobe at %p unregistered\n", kp.addr);
}
module_init(kprobe_init)
*/
static int ret_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
- int retval = regs_return_value(regs);
+ unsigned long retval = regs_return_value(regs);
struct my_data *data = (struct my_data *)ri->data;
s64 delta;
ktime_t now;
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, data->entry_stamp));
- printk(KERN_INFO "%s returned %d and took %lld ns to execute\n",
+ pr_info("%s returned %lu and took %lld ns to execute\n",
func_name, retval, (long long)delta);
return 0;
}
my_kretprobe.kp.symbol_name = func_name;
ret = register_kretprobe(&my_kretprobe);
if (ret < 0) {
- printk(KERN_INFO "register_kretprobe failed, returned %d\n",
- ret);
+ pr_err("register_kretprobe failed, returned %d\n", ret);
return -1;
}
- printk(KERN_INFO "Planted return probe at %s: %p\n",
+ pr_info("Planted return probe at %s: %p\n",
my_kretprobe.kp.symbol_name, my_kretprobe.kp.addr);
return 0;
}
static void __exit kretprobe_exit(void)
{
unregister_kretprobe(&my_kretprobe);
- printk(KERN_INFO "kretprobe at %p unregistered\n",
- my_kretprobe.kp.addr);
+ pr_info("kretprobe at %p unregistered\n", my_kretprobe.kp.addr);
/* nmissed > 0 suggests that maxactive was set too low. */
- printk(KERN_INFO "Missed probing %d instances of %s\n",
+ pr_info("Missed probing %d instances of %s\n",
my_kretprobe.nmissed, my_kretprobe.kp.symbol_name);
}