7d445beba8
The current derivation of pfns/page structures has a fair amount of complexity in it, and can be simplified. For example, there need not be completely separate cases for pages that are all remapped into the vmalloc space, but were allocated differently (i.e. atomic allocations vs contiguous allocations). So, simplify the handling for those, and leverage iommu_dma_mmap() and iommu_dma_get_sgtable(), as there is nothing fastmap-specific with the way pages are mapped into userspace and how we construct an sg-table respectively. Change-Id: Ia5725a93ee84f2c5e7ccc1d6e62f828338e83bd7 Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
299 lines
6.8 KiB
C
299 lines
6.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
* Copyright (c) 2014 The Linux Foundation
|
|
*/
|
|
#include <linux/dma-direct.h>
|
|
#include <linux/dma-noncoherent.h>
|
|
#include <linux/dma-contiguous.h>
|
|
#include <linux/init.h>
|
|
#include <linux/genalloc.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
struct page **dma_common_find_pages(void *cpu_addr)
|
|
{
|
|
struct vm_struct *area = find_vm_area(cpu_addr);
|
|
|
|
if (!area || area->flags != VM_DMA_COHERENT)
|
|
return NULL;
|
|
return area->pages;
|
|
}
|
|
|
|
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
|
size_t size, pgprot_t prot, const void *caller)
|
|
{
|
|
struct vm_struct *area;
|
|
|
|
area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
|
|
if (!area)
|
|
return NULL;
|
|
|
|
if (map_vm_area(area, prot, pages)) {
|
|
vunmap(area->addr);
|
|
return NULL;
|
|
}
|
|
|
|
return area;
|
|
}
|
|
|
|
/*
|
|
* Remaps an array of PAGE_SIZE pages into another vm_area.
|
|
* Cannot be used in non-sleeping contexts
|
|
*/
|
|
void *dma_common_pages_remap(struct page **pages, size_t size,
|
|
pgprot_t prot, const void *caller)
|
|
{
|
|
struct vm_struct *area;
|
|
|
|
area = __dma_common_pages_remap(pages, size, prot, caller);
|
|
if (!area)
|
|
return NULL;
|
|
|
|
area->pages = pages;
|
|
|
|
return area->addr;
|
|
}
|
|
|
|
/*
|
|
* Remaps an allocated contiguous region into another vm_area.
|
|
* Cannot be used in non-sleeping contexts
|
|
*/
|
|
void *dma_common_contiguous_remap(struct page *page, size_t size,
|
|
pgprot_t prot, const void *caller)
|
|
{
|
|
int i;
|
|
struct page **pages;
|
|
struct vm_struct *area;
|
|
|
|
pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
|
|
if (!pages)
|
|
return NULL;
|
|
|
|
for (i = 0; i < (size >> PAGE_SHIFT); i++)
|
|
pages[i] = nth_page(page, i);
|
|
|
|
area = __dma_common_pages_remap(pages, size, prot, caller);
|
|
|
|
kfree(pages);
|
|
|
|
if (!area)
|
|
return NULL;
|
|
return area->addr;
|
|
}
|
|
|
|
/*
|
|
* Unmaps a range previously mapped by dma_common_*_remap
|
|
*/
|
|
void dma_common_free_remap(void *cpu_addr, size_t size)
|
|
{
|
|
struct vm_struct *area = find_vm_area(cpu_addr);
|
|
|
|
if (!area || area->flags != VM_DMA_COHERENT) {
|
|
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
|
return;
|
|
}
|
|
|
|
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
|
|
vunmap(cpu_addr);
|
|
}
|
|
|
|
#ifdef CONFIG_DMA_DIRECT_REMAP
|
|
static struct gen_pool *atomic_pool __ro_after_init;
|
|
|
|
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
|
static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
|
|
|
|
static int __init early_coherent_pool(char *p)
|
|
{
|
|
atomic_pool_size = memparse(p, &p);
|
|
return 0;
|
|
}
|
|
early_param("coherent_pool", early_coherent_pool);
|
|
|
|
static gfp_t dma_atomic_pool_gfp(void)
|
|
{
|
|
if (IS_ENABLED(CONFIG_ZONE_DMA))
|
|
return GFP_DMA;
|
|
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
|
return GFP_DMA32;
|
|
return GFP_KERNEL;
|
|
}
|
|
|
|
struct gen_pool *__init __dma_atomic_pool_init(void)
|
|
{
|
|
unsigned int pool_size_order = get_order(atomic_pool_size);
|
|
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
|
|
struct page *page;
|
|
struct gen_pool *pool;
|
|
void *addr;
|
|
int ret;
|
|
|
|
if (dev_get_cma_area(NULL))
|
|
page = dma_alloc_from_contiguous(NULL, nr_pages,
|
|
pool_size_order, false);
|
|
else
|
|
page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order);
|
|
if (!page)
|
|
goto out;
|
|
|
|
arch_dma_prep_coherent(page, atomic_pool_size);
|
|
|
|
pool = gen_pool_create(PAGE_SHIFT, -1);
|
|
if (!pool)
|
|
goto free_page;
|
|
|
|
addr = dma_common_contiguous_remap(page, atomic_pool_size,
|
|
pgprot_dmacoherent(PAGE_KERNEL),
|
|
__builtin_return_address(0));
|
|
if (!addr)
|
|
goto destroy_genpool;
|
|
|
|
ret = gen_pool_add_virt(pool, (unsigned long)addr,
|
|
page_to_phys(page), atomic_pool_size, -1);
|
|
if (ret)
|
|
goto remove_mapping;
|
|
gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
|
|
|
|
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
|
|
atomic_pool_size / 1024);
|
|
return pool;
|
|
|
|
remove_mapping:
|
|
dma_common_free_remap(addr, atomic_pool_size);
|
|
destroy_genpool:
|
|
gen_pool_destroy(pool);
|
|
pool = NULL;
|
|
free_page:
|
|
if (!dma_release_from_contiguous(NULL, page, nr_pages))
|
|
__free_pages(page, pool_size_order);
|
|
out:
|
|
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
|
|
atomic_pool_size / 1024);
|
|
return ERR_PTR(-ENOMEM);
|
|
}
|
|
|
|
static int __init dma_atomic_pool_init(void)
|
|
{
|
|
struct gen_pool *pool = __dma_atomic_pool_init();
|
|
|
|
if (!IS_ERR(pool)) {
|
|
atomic_pool = pool;
|
|
return 0;
|
|
}
|
|
|
|
return PTR_ERR(pool);
|
|
}
|
|
postcore_initcall(dma_atomic_pool_init);
|
|
|
|
static bool __dma_in_atomic_pool(struct gen_pool *pool, void *start,
|
|
size_t size)
|
|
{
|
|
if (unlikely(!pool))
|
|
return false;
|
|
|
|
return addr_in_gen_pool(pool, (unsigned long)start, size);
|
|
}
|
|
|
|
bool dma_in_atomic_pool(void *start, size_t size)
|
|
{
|
|
return __dma_in_atomic_pool(atomic_pool, start, size);
|
|
}
|
|
|
|
void *__dma_alloc_from_pool(struct gen_pool *pool, size_t size,
|
|
struct page **ret_page, gfp_t flags)
|
|
{
|
|
unsigned long val;
|
|
void *ptr = NULL;
|
|
|
|
if (!pool) {
|
|
WARN(1, "coherent pool not initialised!\n");
|
|
return NULL;
|
|
}
|
|
|
|
val = gen_pool_alloc(pool, size);
|
|
if (val) {
|
|
phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
|
|
|
|
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
|
ptr = (void *)val;
|
|
memset(ptr, 0, size);
|
|
}
|
|
|
|
return ptr;
|
|
}
|
|
|
|
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
|
{
|
|
return __dma_alloc_from_pool(atomic_pool, size, ret_page, flags);
|
|
}
|
|
|
|
bool __dma_free_from_pool(struct gen_pool *pool, void *start, size_t size)
|
|
{
|
|
if (!__dma_in_atomic_pool(pool, start, size))
|
|
return false;
|
|
gen_pool_free(pool, (unsigned long)start, size);
|
|
return true;
|
|
}
|
|
|
|
bool dma_free_from_pool(void *start, size_t size)
|
|
{
|
|
return __dma_free_from_pool(atomic_pool, start, size);
|
|
}
|
|
|
|
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
gfp_t flags, unsigned long attrs)
|
|
{
|
|
struct page *page = NULL;
|
|
void *ret;
|
|
|
|
size = PAGE_ALIGN(size);
|
|
|
|
if (!gfpflags_allow_blocking(flags)) {
|
|
ret = dma_alloc_from_pool(size, &page, flags);
|
|
if (!ret)
|
|
return NULL;
|
|
goto done;
|
|
}
|
|
|
|
page = __dma_direct_alloc_pages(dev, size, dma_handle, flags, attrs);
|
|
if (!page)
|
|
return NULL;
|
|
|
|
/* remove any dirty cache lines on the kernel alias */
|
|
arch_dma_prep_coherent(page, size);
|
|
|
|
/* create a coherent mapping */
|
|
ret = dma_common_contiguous_remap(page, size,
|
|
dma_pgprot(dev, PAGE_KERNEL, attrs),
|
|
__builtin_return_address(0));
|
|
if (!ret) {
|
|
__dma_direct_free_pages(dev, size, page);
|
|
return ret;
|
|
}
|
|
|
|
memset(ret, 0, size);
|
|
done:
|
|
*dma_handle = phys_to_dma(dev, page_to_phys(page));
|
|
return ret;
|
|
}
|
|
|
|
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
|
dma_addr_t dma_handle, unsigned long attrs)
|
|
{
|
|
if (!dma_free_from_pool(vaddr, PAGE_ALIGN(size))) {
|
|
phys_addr_t phys = dma_to_phys(dev, dma_handle);
|
|
struct page *page = pfn_to_page(__phys_to_pfn(phys));
|
|
|
|
vunmap(vaddr);
|
|
__dma_direct_free_pages(dev, size, page);
|
|
}
|
|
}
|
|
|
|
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
|
|
dma_addr_t dma_addr)
|
|
{
|
|
return __phys_to_pfn(dma_to_phys(dev, dma_addr));
|
|
}
|
|
#endif /* CONFIG_DMA_DIRECT_REMAP */
|