msm: kgsl: Add support for enhanced memory accounting

Update the vm_stat counter NR_UNRECLAIMABLE_PAGES to account for
the total memory allocated. Update the rss_stat counter MM_UNRECLAIMABLE
to account for the memory allocated and memory imported per process.

Change-Id: I0a40bf215fedbe421cce8d5fc41e2580fefb6523
Signed-off-by: Puranam V G Tejaswi <pvgtejas@codeaurora.org>
This commit is contained in:
Puranam V G Tejaswi 2021-05-28 12:48:30 +05:30
parent ee8d6eaeda
commit 32ce17873b
3 changed files with 73 additions and 10 deletions

View File

@ -335,6 +335,27 @@ static void kgsl_destroy_ion(struct kgsl_dma_buf_meta *meta)
}
#endif
static void kgsl_process_sub_stats(struct kgsl_process_private *priv,
unsigned int type, uint64_t size)
{
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
struct task_struct *task;
struct mm_struct *mm;
task = get_pid_task(priv->pid, PIDTYPE_PID);
if (task) {
mm = get_task_mm(task);
if (mm) {
add_mm_counter(mm, MM_UNRECLAIMABLE,
-(size >> PAGE_SHIFT));
mmput(mm);
}
put_task_struct(task);
}
#endif
atomic64_sub(size, &priv->stats[type].cur);
}
void
kgsl_mem_entry_destroy(struct kref *kref)
{
@ -349,7 +370,7 @@ kgsl_mem_entry_destroy(struct kref *kref)
/* pull out the memtype before the flags get cleared */
memtype = kgsl_memdesc_usermem_type(&entry->memdesc);
atomic64_sub(entry->memdesc.size, &entry->priv->stats[memtype].cur);
kgsl_process_sub_stats(entry->priv, memtype, entry->memdesc.size);
/* Detach from process list */
kgsl_mem_entry_detach_process(entry);
@ -2753,10 +2774,12 @@ static void kgsl_process_add_stats(struct kgsl_process_private *priv,
if (ret > priv->stats[type].max)
priv->stats[type].max = ret;
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
add_mm_counter(current->mm, MM_UNRECLAIMABLE, (size >> PAGE_SHIFT));
#endif
}
long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{

View File

@ -450,6 +450,11 @@ done:
pcount++;
}
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
(1 << order));
#endif
return pcount;
eagain:
@ -521,6 +526,11 @@ static void kgsl_pool_free_page(struct page *page)
page_order = compound_order(page);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
-(1 << page_order));
#endif
if (!kgsl_pool_max_pages ||
(kgsl_pool_size_total() < kgsl_pool_max_pages)) {
pool = _kgsl_get_pool_from_order(page_order);

View File

@ -952,6 +952,11 @@ static void kgsl_contiguous_free(struct kgsl_memdesc *memdesc)
atomic_long_sub(memdesc->size, &kgsl_driver.stats.coherent);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(phys_to_page(memdesc->physaddr)),
NR_UNRECLAIMABLE_PAGES, -(memdesc->size >> PAGE_SHIFT));
#endif
_kgsl_contiguous_free(memdesc);
}
@ -961,6 +966,7 @@ static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
int i;
struct scatterlist *sg;
int ret = unlock_sgt(memdesc->sgt);
int order = get_order(PAGE_SIZE);
if (ret) {
/*
@ -979,7 +985,11 @@ static void kgsl_free_secure_system_pages(struct kgsl_memdesc *memdesc)
for_each_sg(memdesc->sgt->sgl, sg, memdesc->sgt->nents, i) {
struct page *page = sg_page(sg);
__free_pages(page, get_order(PAGE_SIZE));
__free_pages(page, order);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(page), NR_UNRECLAIMABLE_PAGES,
-(1 << order));
#endif
}
sg_free_table(memdesc->sgt);
@ -1032,15 +1042,20 @@ static void kgsl_free_pool_pages(struct kgsl_memdesc *memdesc)
static void kgsl_free_system_pages(struct kgsl_memdesc *memdesc)
{
int i;
int i, order = get_order(PAGE_SIZE);
kgsl_paged_unmap_kernel(memdesc);
WARN_ON(memdesc->hostptr);
atomic_long_sub(memdesc->size, &kgsl_driver.stats.page_alloc);
for (i = 0; i < memdesc->page_count; i++)
__free_pages(memdesc->pages[i], get_order(PAGE_SIZE));
for (i = 0; i < memdesc->page_count; i++) {
__free_pages(memdesc->pages[i], order);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(memdesc->pages[i]),
NR_UNRECLAIMABLE_PAGES, -(1 << order));
#endif
}
memdesc->page_count = 0;
kvfree(memdesc->pages);
@ -1087,6 +1102,7 @@ static int kgsl_system_alloc_pages(u64 size, struct page ***pages,
struct scatterlist sg;
struct page **local;
int i, npages = size >> PAGE_SHIFT;
int order = get_order(PAGE_SIZE);
local = kvcalloc(npages, sizeof(*pages), GFP_KERNEL);
if (!local)
@ -1098,8 +1114,13 @@ static int kgsl_system_alloc_pages(u64 size, struct page ***pages,
local[i] = alloc_pages(gfp, get_order(PAGE_SIZE));
if (!local[i]) {
for (i = i - 1; i >= 0; i--)
__free_pages(local[i], get_order(PAGE_SIZE));
for (i = i - 1; i >= 0; i--) {
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(local[i]),
NR_UNRECLAIMABLE_PAGES, -(1 << order));
#endif
__free_pages(local[i], order);
}
kvfree(local);
return -ENOMEM;
}
@ -1110,6 +1131,10 @@ static int kgsl_system_alloc_pages(u64 size, struct page ***pages,
sg_dma_address(&sg) = page_to_phys(local[i]);
dma_sync_sg_for_device(dev, &sg, 1, DMA_BIDIRECTIONAL);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(local[i]), NR_UNRECLAIMABLE_PAGES,
(1 << order));
#endif
}
*pages = local;
@ -1256,9 +1281,14 @@ static int kgsl_alloc_contiguous(struct kgsl_device *device,
memdesc->ops = &kgsl_contiguous_ops;
ret = _kgsl_alloc_contiguous(&device->pdev->dev, memdesc, size, 0);
if (!ret)
if (!ret) {
KGSL_STATS_ADD(size, &kgsl_driver.stats.coherent,
&kgsl_driver.stats.coherent_max);
#ifdef CONFIG_MM_STAT_UNRECLAIMABLE_PAGES
mod_node_page_state(page_pgdat(phys_to_page(memdesc->physaddr)),
NR_UNRECLAIMABLE_PAGES, (size >> PAGE_SHIFT));
#endif
}
return ret;
}