dma-mapping-fast: Add support for DMA_ATTR_FORCE_CONTIGUOUS

Allow devices with an iommu to allocate from a cma area if
the DMA_ATTR_FORCE_CONTIGUOUS flag is given.

Change-Id: Ifd8575d1cea57cbc13c602f6edb6ab389844b35d
Signed-off-by: Patrick Daly <pdaly@codeaurora.org>
[isaacm@codeaurora.org: resolved merge conflicts]
Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
Patrick Daly 2019-09-19 15:32:00 -07:00 committed by Isaac J. Manjarres
parent 91723534b5
commit f3f78a96c7

View File

@ -550,7 +550,6 @@ static void *fast_smmu_alloc_atomic(struct dma_fast_smmu_mapping *mapping,
dma_addr_t dma_addr;
int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
size = ALIGN(size, FAST_PAGE_SIZE);
if (coherent) {
page = alloc_pages(gfp, get_order(size));
addr = page ? page_address(page) : NULL;
@ -613,6 +612,54 @@ static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
return pages;
}
static void *__fast_smmu_alloc_contiguous(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
{
struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
bool is_coherent = is_dma_coherent(dev, attrs);
int prot = dma_info_to_prot(DMA_BIDIRECTIONAL, is_coherent, attrs);
pgprot_t remap_prot = __get_dma_pgprot(attrs, PAGE_KERNEL, is_coherent);
struct page *page;
dma_addr_t iova;
unsigned long flags;
void *coherent_addr;
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size), gfp & __GFP_NOWARN);
if (!page)
return NULL;
spin_lock_irqsave(&mapping->lock, flags);
iova = __fast_smmu_alloc_iova(mapping, attrs, size);
spin_unlock_irqrestore(&mapping->lock, flags);
if (iova == DMA_ERROR_CODE)
goto release_page;
if (av8l_fast_map_public(mapping->pgtbl_ops, iova, page_to_phys(page),
size, prot))
goto release_iova;
coherent_addr = dma_common_contiguous_remap(page, size, remap_prot,
__fast_smmu_alloc_contiguous);
if (!coherent_addr)
goto release_mapping;
if (!is_coherent)
__dma_flush_area(page_to_virt(page), size);
*handle = iova;
return coherent_addr;
release_mapping:
av8l_fast_unmap_public(mapping->pgtbl_ops, iova, size);
release_iova:
__fast_smmu_free_iova(mapping, iova, size);
release_page:
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
return NULL;
}
static void *fast_smmu_alloc(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t gfp,
unsigned long attrs)
@ -640,11 +687,14 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
}
*handle = DMA_ERROR_CODE;
size = ALIGN(size, SZ_4K);
if (!gfpflags_allow_blocking(gfp)) {
if (!gfpflags_allow_blocking(gfp))
return fast_smmu_alloc_atomic(mapping, size, gfp, attrs, handle,
is_coherent);
}
else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
return __fast_smmu_alloc_contiguous(dev, size, handle, gfp,
attrs);
pages = __fast_smmu_alloc_pages(count, gfp);
if (!pages) {
@ -652,7 +702,6 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
return NULL;
}
size = ALIGN(size, SZ_4K);
if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
dev_err(dev, "no sg tablen\n");
goto out_free_pages;
@ -740,6 +789,11 @@ static void fast_smmu_free(struct device *dev, size_t size,
dma_common_free_remap(cpu_addr, size);
__fast_smmu_free_pages(pages, size >> FAST_PAGE_SHIFT);
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
struct page *page = vmalloc_to_page(cpu_addr);
dma_common_free_remap(cpu_addr, size);
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
} else if (!is_vmalloc_addr(cpu_addr)) {
__free_pages(virt_to_page(cpu_addr), get_order(size));
} else if (fast_dma_in_atomic_pool(cpu_addr, size)) {
@ -780,6 +834,8 @@ static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
if (area && area->pages)
return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs);
else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
pfn = vmalloc_to_pfn(cpu_addr);
else if (!is_vmalloc_addr(cpu_addr))
pfn = page_to_pfn(virt_to_page(cpu_addr));
else if (fast_dma_in_atomic_pool(cpu_addr, size))
@ -805,6 +861,8 @@ static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
if (area && area->pages)
return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0,
size, GFP_KERNEL);
else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
page = vmalloc_to_page(cpu_addr);
else if (!is_vmalloc_addr(cpu_addr))
page = virt_to_page(cpu_addr);
else if (fast_dma_in_atomic_pool(cpu_addr, size))