dma-mapping-fast: Clean up common code
The current derivation of pfns/page structures has a fair amount of complexity in it, and can be simplified. For example, there need not be completely separate cases for pages that are all remapped into the vmalloc space, but were allocated differently (i.e. atomic allocations vs contiguous allocations). So, simplify the handling for those, and leverage iommu_dma_mmap() and iommu_dma_get_sgtable(), as there is nothing fastmap-specific with the way pages are mapped into userspace and how we construct an sg-table respectively. Change-Id: Ia5725a93ee84f2c5e7ccc1d6e62f828338e83bd7 Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
parent
f3f78a96c7
commit
7d445beba8
@ -1102,7 +1102,7 @@ int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
static int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
|
@ -46,11 +46,6 @@ static int __init fast_smmu_dma_init(void)
|
||||
}
|
||||
arch_initcall(fast_smmu_dma_init);
|
||||
|
||||
static bool fast_dma_in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
return __dma_in_atomic_pool(fast_atomic_pool, start, size);
|
||||
}
|
||||
|
||||
static void *fast_dma_alloc_from_pool(size_t size, struct page **ret_page,
|
||||
gfp_t flags)
|
||||
{
|
||||
@ -62,11 +57,6 @@ static bool fast_dma_free_from_pool(void *start, size_t size)
|
||||
return __dma_free_from_pool(fast_atomic_pool, start, size);
|
||||
}
|
||||
|
||||
static phys_addr_t fast_dma_get_phys(void *addr)
|
||||
{
|
||||
return gen_pool_virt_to_phys(fast_atomic_pool, (unsigned long)addr);
|
||||
}
|
||||
|
||||
static bool is_dma_coherent(struct device *dev, unsigned long attrs)
|
||||
{
|
||||
bool is_coherent;
|
||||
@ -773,7 +763,8 @@ static void fast_smmu_free(struct device *dev, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct dma_fast_smmu_mapping *mapping = dev_get_mapping(dev);
|
||||
struct vm_struct *area;
|
||||
struct page **pages = NULL;
|
||||
struct page *page = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
size = ALIGN(size, FAST_PAGE_SIZE);
|
||||
@ -783,98 +774,37 @@ static void fast_smmu_free(struct device *dev, size_t size,
|
||||
__fast_smmu_free_iova(mapping, dma_handle, size);
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
|
||||
area = find_vm_area(cpu_addr);
|
||||
if (area && area->pages) {
|
||||
struct page **pages = area->pages;
|
||||
if (fast_dma_free_from_pool(cpu_addr, size))
|
||||
return;
|
||||
|
||||
if (is_vmalloc_addr(cpu_addr)) {
|
||||
pages = dma_common_find_pages(cpu_addr);
|
||||
if (!pages)
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
dma_common_free_remap(cpu_addr, size);
|
||||
} else {
|
||||
page = virt_to_page(cpu_addr);
|
||||
}
|
||||
|
||||
if (pages)
|
||||
__fast_smmu_free_pages(pages, size >> FAST_PAGE_SHIFT);
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
dma_common_free_remap(cpu_addr, size);
|
||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||
} else if (!is_vmalloc_addr(cpu_addr)) {
|
||||
__free_pages(virt_to_page(cpu_addr), get_order(size));
|
||||
} else if (fast_dma_in_atomic_pool(cpu_addr, size)) {
|
||||
// Keep remap
|
||||
fast_dma_free_from_pool(cpu_addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
/* __swiotlb_mmap_pfn is not currently exported. */
|
||||
static int fast_smmu_mmap_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
size_t size)
|
||||
{
|
||||
int ret = -ENXIO;
|
||||
unsigned long nr_vma_pages = vma_pages(vma);
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
||||
ret = remap_pfn_range(vma, vma->vm_start, pfn + off,
|
||||
vma->vm_end - vma->vm_start,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
|
||||
return ret;
|
||||
if (page)
|
||||
dma_free_contiguous(dev, page, size);
|
||||
}
|
||||
|
||||
static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size, unsigned long attrs)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
bool coherent = is_dma_coherent(dev, attrs);
|
||||
unsigned long pfn = 0;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||
coherent);
|
||||
area = find_vm_area(cpu_addr);
|
||||
if (area && area->pages)
|
||||
return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size,
|
||||
attrs);
|
||||
else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
|
||||
pfn = vmalloc_to_pfn(cpu_addr);
|
||||
else if (!is_vmalloc_addr(cpu_addr))
|
||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
else if (fast_dma_in_atomic_pool(cpu_addr, size))
|
||||
pfn = fast_dma_get_phys(cpu_addr) >> PAGE_SHIFT;
|
||||
|
||||
|
||||
if (pfn)
|
||||
return fast_smmu_mmap_pfn(vma, pfn, size);
|
||||
|
||||
return -EINVAL;
|
||||
return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr,
|
||||
size_t size, unsigned long attrs)
|
||||
{
|
||||
unsigned int n_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct vm_struct *area;
|
||||
struct page *page = NULL;
|
||||
int ret = -ENXIO;
|
||||
|
||||
area = find_vm_area(cpu_addr);
|
||||
if (area && area->pages)
|
||||
return sg_alloc_table_from_pages(sgt, area->pages, n_pages, 0,
|
||||
size, GFP_KERNEL);
|
||||
else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
else if (!is_vmalloc_addr(cpu_addr))
|
||||
page = virt_to_page(cpu_addr);
|
||||
else if (fast_dma_in_atomic_pool(cpu_addr, size))
|
||||
page = phys_to_page(fast_dma_get_phys(cpu_addr));
|
||||
|
||||
if (page) {
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
if (!ret)
|
||||
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
|
||||
}
|
||||
|
||||
static dma_addr_t fast_smmu_dma_map_resource(
|
||||
|
@ -30,6 +30,9 @@ void iommu_dma_invalidate_sg(struct scatterlist *sg, int nents);
|
||||
int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs);
|
||||
|
||||
/* Setup call for arch DMA mapping code */
|
||||
void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size);
|
||||
|
@ -677,7 +677,6 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size);
|
||||
|
||||
struct gen_pool *__init __dma_atomic_pool_init(void);
|
||||
bool __dma_in_atomic_pool(struct gen_pool *pool, void *start, size_t size);
|
||||
bool dma_in_atomic_pool(void *start, size_t size);
|
||||
void *__dma_alloc_from_pool(struct gen_pool *pool, size_t size,
|
||||
struct page **ret_page, gfp_t flags);
|
||||
|
@ -186,7 +186,8 @@ static int __init dma_atomic_pool_init(void)
|
||||
}
|
||||
postcore_initcall(dma_atomic_pool_init);
|
||||
|
||||
bool __dma_in_atomic_pool(struct gen_pool *pool, void *start, size_t size)
|
||||
static bool __dma_in_atomic_pool(struct gen_pool *pool, void *start,
|
||||
size_t size)
|
||||
{
|
||||
if (unlikely(!pool))
|
||||
return false;
|
||||
|
Loading…
Reference in New Issue
Block a user