dma-mapping-fast: Do not clobber existing atomic pool
The generic atomic pool is initialized at the postcore initcall level. However, the same function is called to initialize the atomic pool for fastmap at a later level, and that function makes it so that the generic pool is clobbered, and we start using the same atomic pool for both fastmap and allocations outside of fastmap. Fix this by adding support in the atomic pool framework to create and utilize different atomic pools, and creating a private atomic pool for fastmap. Change-Id: I934774f44cc25ff66157be7920895618cf76b812 Signed-off-by: Isaac J. Manjarres <isaacm@codeaurora.org>
This commit is contained in:
parent
2cfd894384
commit
3a1bd738ef
@ -31,12 +31,42 @@ static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
|
||||
return prot;
|
||||
}
|
||||
|
||||
static struct gen_pool *fast_atomic_pool __ro_after_init;
|
||||
|
||||
static int __init fast_smmu_dma_init(void)
|
||||
{
|
||||
return dma_atomic_pool_init();
|
||||
struct gen_pool *pool = __dma_atomic_pool_init();
|
||||
|
||||
if (!IS_ERR(pool)) {
|
||||
fast_atomic_pool = pool;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return PTR_ERR(pool);
|
||||
}
|
||||
arch_initcall(fast_smmu_dma_init);
|
||||
|
||||
static bool fast_dma_in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
return __dma_in_atomic_pool(fast_atomic_pool, start, size);
|
||||
}
|
||||
|
||||
static void *fast_dma_alloc_from_pool(size_t size, struct page **ret_page,
|
||||
gfp_t flags)
|
||||
{
|
||||
return __dma_alloc_from_pool(fast_atomic_pool, size, ret_page, flags);
|
||||
}
|
||||
|
||||
static bool fast_dma_free_from_pool(void *start, size_t size)
|
||||
{
|
||||
return __dma_free_from_pool(fast_atomic_pool, start, size);
|
||||
}
|
||||
|
||||
static phys_addr_t fast_dma_get_phys(void *addr)
|
||||
{
|
||||
return gen_pool_virt_to_phys(fast_atomic_pool, (unsigned long)addr);
|
||||
}
|
||||
|
||||
static bool is_dma_coherent(struct device *dev, unsigned long attrs)
|
||||
{
|
||||
bool is_coherent;
|
||||
@ -525,7 +555,7 @@ static void *fast_smmu_alloc_atomic(struct dma_fast_smmu_mapping *mapping,
|
||||
page = alloc_pages(gfp, get_order(size));
|
||||
addr = page ? page_address(page) : NULL;
|
||||
} else
|
||||
addr = dma_alloc_from_pool(size, &page, gfp);
|
||||
addr = fast_dma_alloc_from_pool(size, &page, gfp);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
@ -552,7 +582,7 @@ out_free_page:
|
||||
if (coherent)
|
||||
__free_pages(page, get_order(size));
|
||||
else
|
||||
dma_free_from_pool(addr, size);
|
||||
fast_dma_free_from_pool(addr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -701,7 +731,7 @@ static void fast_smmu_free(struct device *dev, size_t size,
|
||||
|
||||
size = ALIGN(size, SZ_4K);
|
||||
|
||||
if (dma_in_atomic_pool(vaddr, size) || !is_vmalloc_addr(vaddr))
|
||||
if (fast_dma_in_atomic_pool(vaddr, size) || !is_vmalloc_addr(vaddr))
|
||||
goto no_remap;
|
||||
|
||||
area = find_vm_area(vaddr);
|
||||
@ -715,8 +745,8 @@ no_remap:
|
||||
av8l_fast_unmap_public(mapping->pgtbl_ops, dma_handle, size);
|
||||
__fast_smmu_free_iova(mapping, dma_handle, size);
|
||||
spin_unlock_irqrestore(&mapping->lock, flags);
|
||||
if (dma_in_atomic_pool(vaddr, size))
|
||||
dma_free_from_pool(vaddr, size);
|
||||
if (fast_dma_in_atomic_pool(vaddr, size))
|
||||
fast_dma_free_from_pool(vaddr, size);
|
||||
else if (is_vmalloc_addr(vaddr))
|
||||
__fast_smmu_free_pages(pages, count);
|
||||
else
|
||||
@ -732,8 +762,8 @@ static int __vma_remap_range(struct vm_area_struct *vma, void *cpu_addr,
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
unsigned long pfn;
|
||||
|
||||
if (dma_in_atomic_pool(cpu_addr, size))
|
||||
pfn = page_to_pfn(vmalloc_to_page(cpu_addr));
|
||||
if (fast_dma_in_atomic_pool(cpu_addr, size))
|
||||
pfn = fast_dma_get_phys(cpu_addr) >> PAGE_SHIFT;
|
||||
else
|
||||
pfn = page_to_pfn(virt_to_page(cpu_addr));
|
||||
|
||||
@ -759,7 +789,8 @@ static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||
coherent);
|
||||
|
||||
if (dma_in_atomic_pool(cpu_addr, size) || !is_vmalloc_addr(cpu_addr))
|
||||
if (fast_dma_in_atomic_pool(cpu_addr, size) ||
|
||||
!is_vmalloc_addr(cpu_addr))
|
||||
return __vma_remap_range(vma, cpu_addr, size);
|
||||
|
||||
area = find_vm_area(cpu_addr);
|
||||
@ -793,8 +824,8 @@ static int fast_smmu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
size, GFP_KERNEL);
|
||||
else if (!is_vmalloc_addr(cpu_addr))
|
||||
page = virt_to_page(cpu_addr);
|
||||
else if (dma_in_atomic_pool(cpu_addr, size))
|
||||
page = vmalloc_to_page(cpu_addr);
|
||||
else if (fast_dma_in_atomic_pool(cpu_addr, size))
|
||||
page = phys_to_page(fast_dma_get_phys(cpu_addr));
|
||||
|
||||
if (page) {
|
||||
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/genalloc.h>
|
||||
|
||||
/**
|
||||
* List of possible attributes associated with a DMA mapping. The semantics
|
||||
@ -675,9 +676,13 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
pgprot_t prot, const void *caller);
|
||||
void dma_common_free_remap(void *cpu_addr, size_t size);
|
||||
|
||||
int __init dma_atomic_pool_init(void);
|
||||
struct gen_pool *__init __dma_atomic_pool_init(void);
|
||||
bool __dma_in_atomic_pool(struct gen_pool *pool, void *start, size_t size);
|
||||
bool dma_in_atomic_pool(void *start, size_t size);
|
||||
void *__dma_alloc_from_pool(struct gen_pool *pool, size_t size,
|
||||
struct page **ret_page, gfp_t flags);
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
|
||||
bool __dma_free_from_pool(struct gen_pool *pool, void *start, size_t size);
|
||||
bool dma_free_from_pool(void *start, size_t size);
|
||||
|
||||
int
|
||||
|
@ -120,11 +120,12 @@ static gfp_t dma_atomic_pool_gfp(void)
|
||||
return GFP_KERNEL;
|
||||
}
|
||||
|
||||
int __init dma_atomic_pool_init(void)
|
||||
struct gen_pool *__init __dma_atomic_pool_init(void)
|
||||
{
|
||||
unsigned int pool_size_order = get_order(atomic_pool_size);
|
||||
unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
|
||||
struct page *page;
|
||||
struct gen_pool *pool;
|
||||
void *addr;
|
||||
int ret;
|
||||
|
||||
@ -138,8 +139,8 @@ int __init dma_atomic_pool_init(void)
|
||||
|
||||
arch_dma_prep_coherent(page, atomic_pool_size);
|
||||
|
||||
atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!atomic_pool)
|
||||
pool = gen_pool_create(PAGE_SHIFT, -1);
|
||||
if (!pool)
|
||||
goto free_page;
|
||||
|
||||
addr = dma_common_contiguous_remap(page, atomic_pool_size,
|
||||
@ -148,52 +149,70 @@ int __init dma_atomic_pool_init(void)
|
||||
if (!addr)
|
||||
goto destroy_genpool;
|
||||
|
||||
ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
|
||||
ret = gen_pool_add_virt(pool, (unsigned long)addr,
|
||||
page_to_phys(page), atomic_pool_size, -1);
|
||||
if (ret)
|
||||
goto remove_mapping;
|
||||
gen_pool_set_algo(atomic_pool, gen_pool_first_fit_order_align, NULL);
|
||||
gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
|
||||
|
||||
pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
|
||||
atomic_pool_size / 1024);
|
||||
return 0;
|
||||
return pool;
|
||||
|
||||
remove_mapping:
|
||||
dma_common_free_remap(addr, atomic_pool_size);
|
||||
destroy_genpool:
|
||||
gen_pool_destroy(atomic_pool);
|
||||
atomic_pool = NULL;
|
||||
gen_pool_destroy(pool);
|
||||
pool = NULL;
|
||||
free_page:
|
||||
if (!dma_release_from_contiguous(NULL, page, nr_pages))
|
||||
__free_pages(page, pool_size_order);
|
||||
out:
|
||||
pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
|
||||
atomic_pool_size / 1024);
|
||||
return -ENOMEM;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
static int __init dma_atomic_pool_init(void)
|
||||
{
|
||||
struct gen_pool *pool = __dma_atomic_pool_init();
|
||||
|
||||
if (!IS_ERR(pool)) {
|
||||
atomic_pool = pool;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return PTR_ERR(pool);
|
||||
}
|
||||
postcore_initcall(dma_atomic_pool_init);
|
||||
|
||||
bool dma_in_atomic_pool(void *start, size_t size)
|
||||
bool __dma_in_atomic_pool(struct gen_pool *pool, void *start, size_t size)
|
||||
{
|
||||
if (unlikely(!atomic_pool))
|
||||
if (unlikely(!pool))
|
||||
return false;
|
||||
|
||||
return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
|
||||
return addr_in_gen_pool(pool, (unsigned long)start, size);
|
||||
}
|
||||
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
||||
bool dma_in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
return __dma_in_atomic_pool(atomic_pool, start, size);
|
||||
}
|
||||
|
||||
void *__dma_alloc_from_pool(struct gen_pool *pool, size_t size,
|
||||
struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
unsigned long val;
|
||||
void *ptr = NULL;
|
||||
|
||||
if (!atomic_pool) {
|
||||
if (!pool) {
|
||||
WARN(1, "coherent pool not initialised!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
val = gen_pool_alloc(atomic_pool, size);
|
||||
val = gen_pool_alloc(pool, size);
|
||||
if (val) {
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
|
||||
phys_addr_t phys = gen_pool_virt_to_phys(pool, val);
|
||||
|
||||
*ret_page = pfn_to_page(__phys_to_pfn(phys));
|
||||
ptr = (void *)val;
|
||||
@ -203,12 +222,22 @@ void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
|
||||
{
|
||||
return __dma_alloc_from_pool(atomic_pool, size, ret_page, flags);
|
||||
}
|
||||
|
||||
bool __dma_free_from_pool(struct gen_pool *pool, void *start, size_t size)
|
||||
{
|
||||
if (!__dma_in_atomic_pool(pool, start, size))
|
||||
return false;
|
||||
gen_pool_free(pool, (unsigned long)start, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool dma_free_from_pool(void *start, size_t size)
|
||||
{
|
||||
if (!dma_in_atomic_pool(start, size))
|
||||
return false;
|
||||
gen_pool_free(atomic_pool, (unsigned long)start, size);
|
||||
return true;
|
||||
return __dma_free_from_pool(atomic_pool, start, size);
|
||||
}
|
||||
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
|
Loading…
Reference in New Issue
Block a user