android_kernel_xiaomi_sm8350/include/linux/slab.h
Manfred Spraul 97e2bde47f [PATCH] add kmalloc_node, inline cleanup
The patch makes the following function calls available to allocate memory
on a specific node without changing the basic operation of the slab
allocator:

 kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int flags, int node);
 kmalloc_node(size_t size, unsigned int flags, int node);

in a similar way to the existing node-blind functions:

 kmem_cache_alloc(kmem_cache_t *cachep, unsigned int flags);
 kmalloc(size, flags);

kmem_cache_alloc_node was changed to pass flags and the node information
through the existing layers of the slab allocator (which lead to some minor
rearrangements).  The functions at the lowest layer (kmem_getpages,
cache_grow) are already node aware.  Also __alloc_percpu can call
kmalloc_node now.

Performance measurements (using the pageset localization patch) yields:

w/o patches:
Tasks    jobs/min  jti  jobs/min/task      real       cpu
    1      484.27  100       484.2736     12.02      1.97   Wed Mar 30 20:50:43 2005
  100    25170.83   91       251.7083     23.12    150.10   Wed Mar 30 20:51:06 2005
  200    34601.66   84       173.0083     33.64    294.14   Wed Mar 30 20:51:40 2005
  300    37154.47   86       123.8482     46.99    436.56   Wed Mar 30 20:52:28 2005
  400    39839.82   80        99.5995     58.43    580.46   Wed Mar 30 20:53:27 2005
  500    40036.32   79        80.0726     72.68    728.60   Wed Mar 30 20:54:40 2005
  600    44074.21   79        73.4570     79.23    872.10   Wed Mar 30 20:55:59 2005
  700    44016.60   78        62.8809     92.56   1015.84   Wed Mar 30 20:57:32 2005
  800    40411.05   80        50.5138    115.22   1161.13   Wed Mar 30 20:59:28 2005
  900    42298.56   79        46.9984    123.83   1303.42   Wed Mar 30 21:01:33 2005
 1000    40955.05   80        40.9551    142.11   1441.92   Wed Mar 30 21:03:55 2005

with pageset localization and slab API patches:
Tasks    jobs/min  jti  jobs/min/task      real       cpu
    1      484.19  100       484.1930     12.02      1.98   Wed Mar 30 21:10:18 2005
  100    27428.25   92       274.2825     21.22    149.79   Wed Mar 30 21:10:40 2005
  200    37228.94   86       186.1447     31.27    293.49   Wed Mar 30 21:11:12 2005
  300    41725.42   85       139.0847     41.84    434.10   Wed Mar 30 21:11:54 2005
  400    43032.22   82       107.5805     54.10    582.06   Wed Mar 30 21:12:48 2005
  500    42211.23   83        84.4225     68.94    722.61   Wed Mar 30 21:13:58 2005
  600    40084.49   82        66.8075     87.12    873.11   Wed Mar 30 21:15:25 2005
  700    44169.30   79        63.0990     92.24   1008.77   Wed Mar 30 21:16:58 2005
  800    43097.94   79        53.8724    108.03   1155.88   Wed Mar 30 21:18:47 2005
  900    41846.75   79        46.4964    125.17   1303.38   Wed Mar 30 21:20:52 2005
 1000    40247.85   79        40.2478    144.60   1442.21   Wed Mar 30 21:23:17 2005

Signed-off-by: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Manfred Spraul <manfred@colorfullife.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-05-01 08:58:38 -07:00

137 lines
4.4 KiB
C

/*
* linux/mm/slab.h
* Written by Mark Hemment, 1996.
* (markhe@nextd.demon.co.uk)
*/
#ifndef _LINUX_SLAB_H
#define _LINUX_SLAB_H
#if defined(__KERNEL__)
typedef struct kmem_cache_s kmem_cache_t;
#include <linux/config.h> /* kmalloc_sizes.h needs CONFIG_ options */
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/types.h>
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
/* flags for kmem_cache_alloc() */
#define SLAB_NOFS GFP_NOFS
#define SLAB_NOIO GFP_NOIO
#define SLAB_ATOMIC GFP_ATOMIC
#define SLAB_USER GFP_USER
#define SLAB_KERNEL GFP_KERNEL
#define SLAB_DMA GFP_DMA
#define SLAB_LEVEL_MASK GFP_LEVEL_MASK
#define SLAB_NO_GROW __GFP_NO_GROW /* don't grow a cache */
/* flags to pass to kmem_cache_create().
* The first 3 are only valid when the allocator as been build
* SLAB_DEBUG_SUPPORT.
*/
#define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */
#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
#define SLAB_POISON 0x00000800UL /* Poison objects */
#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate
what is reclaimable later*/
#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */
#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
/* flags passed to a constructor func */
#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
/* prototypes */
extern void __init kmem_cache_init(void);
extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned long,
void (*)(void *, kmem_cache_t *, unsigned long),
void (*)(void *, kmem_cache_t *, unsigned long));
extern int kmem_cache_destroy(kmem_cache_t *);
extern int kmem_cache_shrink(kmem_cache_t *);
extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast);
extern void kmem_cache_free(kmem_cache_t *, void *);
extern unsigned int kmem_cache_size(kmem_cache_t *);
extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags);
/* Size description struct for general caches. */
struct cache_sizes {
size_t cs_size;
kmem_cache_t *cs_cachep;
kmem_cache_t *cs_dmacachep;
};
extern struct cache_sizes malloc_sizes[];
extern void *__kmalloc(size_t, unsigned int __nocast);
static inline void *kmalloc(size_t size, unsigned int __nocast flags)
{
if (__builtin_constant_p(size)) {
int i = 0;
#define CACHE(x) \
if (size <= x) \
goto found; \
else \
i++;
#include "kmalloc_sizes.h"
#undef CACHE
{
extern void __you_cannot_kmalloc_that_much(void);
__you_cannot_kmalloc_that_much();
}
found:
return kmem_cache_alloc((flags & GFP_DMA) ?
malloc_sizes[i].cs_dmacachep :
malloc_sizes[i].cs_cachep, flags);
}
return __kmalloc(size, flags);
}
extern void *kcalloc(size_t, size_t, unsigned int __nocast);
extern void kfree(const void *);
extern unsigned int ksize(const void *);
#ifdef CONFIG_NUMA
extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node);
extern void *kmalloc_node(size_t size, int flags, int node);
#else
static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node)
{
return kmem_cache_alloc(cachep, flags);
}
static inline void *kmalloc_node(size_t size, int flags, int node)
{
return kmalloc(size, flags);
}
#endif
extern int FASTCALL(kmem_cache_reap(int));
extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr));
/* System wide caches */
extern kmem_cache_t *vm_area_cachep;
extern kmem_cache_t *names_cachep;
extern kmem_cache_t *files_cachep;
extern kmem_cache_t *filp_cachep;
extern kmem_cache_t *fs_cachep;
extern kmem_cache_t *signal_cachep;
extern kmem_cache_t *sighand_cachep;
extern kmem_cache_t *bio_cachep;
extern atomic_t slab_reclaim_pages;
#endif /* __KERNEL__ */
#endif /* _LINUX_SLAB_H */