mm: optimize dev_pagemap reference counting around get_dev_pagemap
Change the calling convention so that get_dev_pagemap always consumes the previous reference instead of doing this using an explicit earlier call to put_dev_pagemap in the callers. The callers will still need to put the final reference after finishing the loop over the pages. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Logan Gunthorpe <logang@deltatee.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
This commit is contained in:
parent
0822acb86c
commit
832d7aa051
@ -507,22 +507,23 @@ struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start)
|
||||
* @pfn: page frame number to lookup page_map
|
||||
* @pgmap: optional known pgmap that already has a reference
|
||||
*
|
||||
* @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the
|
||||
* same mapping.
|
||||
* If @pgmap is non-NULL and covers @pfn it will be returned as-is. If @pgmap
|
||||
* is non-NULL but does not cover @pfn the reference to it will be released.
|
||||
*/
|
||||
struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
|
||||
struct dev_pagemap *pgmap)
|
||||
{
|
||||
const struct resource *res = pgmap ? pgmap->res : NULL;
|
||||
resource_size_t phys = PFN_PHYS(pfn);
|
||||
|
||||
/*
|
||||
* In the cached case we're already holding a live reference so
|
||||
* we can simply do a blind increment
|
||||
* In the cached case we're already holding a live reference.
|
||||
*/
|
||||
if (res && phys >= res->start && phys <= res->end) {
|
||||
percpu_ref_get(pgmap->ref);
|
||||
return pgmap;
|
||||
if (pgmap) {
|
||||
const struct resource *res = pgmap ? pgmap->res : NULL;
|
||||
|
||||
if (res && phys >= res->start && phys <= res->end)
|
||||
return pgmap;
|
||||
put_dev_pagemap(pgmap);
|
||||
}
|
||||
|
||||
/* fall back to slow path lookup */
|
||||
|
7
mm/gup.c
7
mm/gup.c
@ -1410,7 +1410,6 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
||||
|
||||
VM_BUG_ON_PAGE(compound_head(page) != head, page);
|
||||
|
||||
put_dev_pagemap(pgmap);
|
||||
SetPageReferenced(page);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
@ -1420,6 +1419,8 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
|
||||
ret = 1;
|
||||
|
||||
pte_unmap:
|
||||
if (pgmap)
|
||||
put_dev_pagemap(pgmap);
|
||||
pte_unmap(ptem);
|
||||
return ret;
|
||||
}
|
||||
@ -1459,10 +1460,12 @@ static int __gup_device_huge(unsigned long pfn, unsigned long addr,
|
||||
SetPageReferenced(page);
|
||||
pages[*nr] = page;
|
||||
get_page(page);
|
||||
put_dev_pagemap(pgmap);
|
||||
(*nr)++;
|
||||
pfn++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
|
||||
if (pgmap)
|
||||
put_dev_pagemap(pgmap);
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user