* @lock: lock for hardware operations involving this dart
* @pgsize: pagesize supported by this DART
* @supports_bypass: indicates if this DART supports bypass mode
- * @force_bypass: force bypass mode due to pagesize mismatch?
* @sid2group: maps stream ids to iommu_groups
* @iommu: iommu core device
*/
u32 pgsize;
u32 supports_bypass : 1;
- u32 force_bypass : 1;
struct iommu_group *sid2group[DART_MAX_STREAMS];
struct iommu_device iommu;
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
struct apple_dart_domain *dart_domain = to_dart_domain(domain);
- if (cfg->stream_maps[0].dart->force_bypass &&
- domain->type != IOMMU_DOMAIN_IDENTITY)
- return -EINVAL;
if (!cfg->stream_maps[0].dart->supports_bypass &&
domain->type == IOMMU_DOMAIN_IDENTITY)
return -EINVAL;
if (cfg_dart) {
if (cfg_dart->supports_bypass != dart->supports_bypass)
return -EINVAL;
- if (cfg_dart->force_bypass != dart->force_bypass)
- return -EINVAL;
if (cfg_dart->pgsize != dart->pgsize)
return -EINVAL;
}
{
struct apple_dart_master_cfg *cfg = dev_iommu_priv_get(dev);
- if (cfg->stream_maps[0].dart->force_bypass)
- return IOMMU_DOMAIN_IDENTITY;
if (!cfg->stream_maps[0].dart->supports_bypass)
return IOMMU_DOMAIN_DMA;
dart_params[1] = readl(dart->regs + DART_PARAMS2);
dart->pgsize = 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT, dart_params[0]);
dart->supports_bypass = dart_params[1] & DART_PARAMS_BYPASS_SUPPORT;
- dart->force_bypass = dart->pgsize > PAGE_SIZE;
ret = request_irq(dart->irq, apple_dart_irq, IRQF_SHARED,
"apple-dart fault handler", dart);
dev_info(
&pdev->dev,
- "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
- dart->pgsize, dart->supports_bypass, dart->force_bypass);
+ "DART [pagesize %x, bypass support: %d] initialized\n",
+ dart->pgsize, dart->supports_bypass);
return 0;
err_sysfs_remove:
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/irq.h>
+#include <linux/kernel.h>
#include <linux/mm.h>
+#include <linux/pfn.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
{
struct page **pages;
unsigned int i = 0, nid = dev_to_node(dev);
+ unsigned int j;
+ unsigned long min_order = __fls(order_mask);
+ unsigned int min_order_size = 1U << min_order;
+
order_mask &= (2U << MAX_ORDER) - 1;
if (!order_mask)
split_page(page, order);
break;
}
- if (!page) {
- __iommu_dma_free_pages(pages, i);
- return NULL;
- }
+
+ /*
+ * If we have no valid page here we might be trying to allocate
+ * the last block consisting of 1<<order pages (to guarantee
+ * alignment) but actually need less pages than that.
+ * In that case we just try to allocate the entire block and
+ * directly free the spillover pages again.
+ */
+ if (!page && !order_mask && count < min_order_size) {
+ page = alloc_pages_node(nid, gfp, min_order);
+ if (!page)
+ goto free_pages;
+ split_page(page, min_order);
+
+ for (j = count; j < min_order_size; ++j)
+ __free_page(page + j);
+
+ order_size = count;
+ }
+
+ if (!page)
+ goto free_pages;
+
count -= order_size;
while (order_size--)
pages[i++] = page++;
}
return pages;
+
+free_pages:
+ __iommu_dma_free_pages(pages, i);
+ return NULL;
}
/*
bool coherent = dev_is_dma_coherent(dev);
int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
+ struct sg_append_table sgt_append = {};
+ struct scatterlist *last_sg;
struct page **pages;
dma_addr_t iova;
+ phys_addr_t orig_s_phys;
+ size_t orig_s_len, orig_s_off, s_iova_off, iova_size;
if (static_branch_unlikely(&iommu_deferred_attach_enabled) &&
iommu_deferred_attach(dev, domain))
return NULL;
min_size = alloc_sizes & -alloc_sizes;
- if (min_size < PAGE_SIZE) {
+ if (iovad->granule > PAGE_SIZE) {
+ if (size < iovad->granule) {
+ /* ensure a single contiguous allocation */
+ min_size = ALIGN(size, PAGE_SIZE*(1U<<get_order(size)));
+ alloc_sizes = min_size;
+ }
+
+ size = PAGE_ALIGN(size);
+ } else if (min_size < PAGE_SIZE) {
min_size = PAGE_SIZE;
alloc_sizes |= PAGE_SIZE;
} else {
if (!pages)
return NULL;
- size = iova_align(iovad, size);
- iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
+ iova_size = iova_align(iovad, size);
+ iova = iommu_dma_alloc_iova(domain, iova_size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
- if (sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL))
+ /* append_table is only used to get a pointer to the last entry */
+ if (sg_alloc_append_table_from_pages(&sgt_append, pages, count, 0,
+ iova_size, UINT_MAX, 0, GFP_KERNEL))
goto out_free_iova;
+ memcpy(sgt, &sgt_append.sgt, sizeof(*sgt));
+ last_sg = sgt_append.prv;
if (!(ioprot & IOMMU_CACHE)) {
struct scatterlist *sg;
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i)
arch_dma_prep_coherent(sg_page(sg), sg->length);
}
+ if (iovad->granule > PAGE_SIZE) {
+ if (size < iovad->granule) {
+ /*
+ * we only have a single sg list entry here that is
+ * likely not aligned to iovad->granule. adjust the
+ * entry to represent the encapsulating IOMMU page
+ * and then later restore everything to its original
+ * values, similar to the impedance matching done in
+ * iommu_dma_map_sg.
+ */
+ orig_s_phys = sg_phys(sgt->sgl);
+ orig_s_len = sgt->sgl->length;
+ orig_s_off = sgt->sgl->offset;
+ s_iova_off = iova_offset(iovad, orig_s_phys);
+
+ sg_set_page(sgt->sgl,
+ pfn_to_page(PHYS_PFN(orig_s_phys - s_iova_off)),
+ iova_align(iovad, orig_s_len + s_iova_off),
+ sgt->sgl->offset & ~s_iova_off);
+ } else {
+ /*
+ * convince iommu_map_sg_atomic to map the last block
+ * even though it may be too small.
+ */
+ orig_s_len = last_sg->length;
+ last_sg->length = iova_align(iovad, last_sg->length);
+ }
+ }
if (iommu_map_sg_atomic(domain, iova, sgt->sgl, sgt->orig_nents, ioprot)
- < size)
+ < iova_size)
goto out_free_sg;
+ if (iovad->granule > PAGE_SIZE) {
+ if (size < iovad->granule) {
+ sg_set_page(sgt->sgl,
+ pfn_to_page(PHYS_PFN(orig_s_phys)),
+ orig_s_len, orig_s_off);
+
+ iova += s_iova_off;
+ } else {
+ last_sg->length = orig_s_len;
+ }
+ }
sgt->sgl->dma_address = iova;
- sgt->sgl->dma_length = size;
+ sgt->sgl->dma_length = iova_size;
return pages;
out_free_sg:
sg_free_table(sgt);
out_free_iova:
- iommu_dma_free_iova(cookie, iova, size, NULL);
+ iommu_dma_free_iova(cookie, iova, iova_size, NULL);
out_free_pages:
__iommu_dma_free_pages(pages, count);
return NULL;
unsigned int s_length = sg_dma_len(s);
unsigned int s_iova_len = s->length;
- s->offset += s_iova_off;
- s->length = s_length;
- sg_dma_address(s) = DMA_MAPPING_ERROR;
+ sg_set_page(s,
+ pfn_to_page(PHYS_PFN(sg_phys(s) + s_iova_off)),
+ s_length, s_iova_off & ~PAGE_MASK);
+ sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
/*
static void __invalidate_sg(struct scatterlist *sg, int nents)
{
struct scatterlist *s;
+ phys_addr_t orig_paddr;
int i;
for_each_sg(sg, s, nents, i) {
- if (sg_dma_address(s) != DMA_MAPPING_ERROR)
- s->offset += sg_dma_address(s);
- if (sg_dma_len(s))
- s->length = sg_dma_len(s);
+ if (sg_dma_len(s)) {
+ orig_paddr = sg_phys(s) + sg_dma_address(s);
+ sg_set_page(s,
+ pfn_to_page(PHYS_PFN(orig_paddr)),
+ sg_dma_len(s),
+ sg_dma_address(s) & ~PAGE_MASK);
+ }
sg_dma_address(s) = DMA_MAPPING_ERROR;
sg_dma_len(s) = 0;
}
* stashing the unaligned parts in the as-yet-unused DMA fields.
*/
for_each_sg(sg, s, nents, i) {
- size_t s_iova_off = iova_offset(iovad, s->offset);
+ phys_addr_t s_phys = sg_phys(s);
+ size_t s_iova_off = iova_offset(iovad, s_phys);
size_t s_length = s->length;
size_t pad_len = (mask - iova_len + 1) & mask;
sg_dma_address(s) = s_iova_off;
sg_dma_len(s) = s_length;
- s->offset -= s_iova_off;
s_length = iova_align(iovad, s_length + s_iova_off);
- s->length = s_length;
-
+ sg_set_page(s, pfn_to_page(PHYS_PFN(s_phys - s_iova_off)),
+ s_length, s->offset & ~s_iova_off);
/*
* Due to the alignment of our single IOVA allocation, we can
* depend on these assumptions about the segment boundary mask:
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
struct page *page;
int ret;
+ if (iovad->granule > PAGE_SIZE)
+ return -ENXIO;
+
if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
struct page **pages = dma_common_find_pages(cpu_addr);