f14f75b811
This patch contains the ia64 uncached page allocator and the generic allocator (genalloc). The uncached allocator was formerly part of the SN2 mspec driver but there are several other users of it so it has been split off from the driver. The generic allocator can be used by device driver to manage special memory etc. The generic allocator is based on the allocator from the sym53c8xx_2 driver. Various users on ia64 needs uncached memory. The SGI SN architecture requires it for inter-partition communication between partitions within a large NUMA cluster. The specific user for this is the XPC code. Another application is large MPI style applications which use it for synchronization, on SN this can be done using special 'fetchop' operations but it also benefits non SN hardware which may use regular uncached memory for this purpose. Performance of doing this through uncached vs cached memory is pretty substantial. This is handled by the mspec driver which I will push out in a seperate patch. Rather than creating a specific allocator for just uncached memory I came up with genalloc which is a generic purpose allocator that can be used by device drivers and other subsystems as they please. For instance to handle onboard device memory. It was derived from the sym53c7xx_2 driver's allocator which is also an example of a potential user (I am refraining from modifying sym2 right now as it seems to have been under fairly heavy development recently). On ia64 memory has various properties within a granule, ie. it isn't safe to access memory as uncached within the same granule as currently has memory accessed in cached mode. The regular system therefore doesn't utilize memory in the lower granules which is mixed in with device PAL code etc. The uncached driver walks the EFI memmap and pulls out the spill uncached pages and sticks them into the uncached pool. Only after these chunks have been utilized, will it start converting regular cached memory into uncached memory. Hence the reason for the EFI related code additions. Signed-off-by: Jes Sorensen <jes@wildopensource.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
247 lines
5.8 KiB
C
247 lines
5.8 KiB
C
/*
|
|
* Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify it
|
|
* under the terms of version 2 of the GNU General Public License
|
|
* as published by the Free Software Foundation.
|
|
*
|
|
* A simple uncached page allocator using the generic allocator. This
|
|
* allocator first utilizes the spare (spill) pages found in the EFI
|
|
* memmap and will then start converting cached pages to uncached ones
|
|
* at a granule at a time. Node awareness is implemented by having a
|
|
* pool of pages per node.
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/efi.h>
|
|
#include <linux/genalloc.h>
|
|
#include <asm/page.h>
|
|
#include <asm/pal.h>
|
|
#include <asm/system.h>
|
|
#include <asm/pgtable.h>
|
|
#include <asm/atomic.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/sn/arch.h>
|
|
|
|
#define DEBUG 0
|
|
|
|
#if DEBUG
|
|
#define dprintk printk
|
|
#else
|
|
#define dprintk(x...) do { } while (0)
|
|
#endif
|
|
|
|
void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);
|
|
|
|
#define MAX_UNCACHED_GRANULES 5
|
|
static int allocated_granules;
|
|
|
|
struct gen_pool *uncached_pool[MAX_NUMNODES];
|
|
|
|
|
|
static void uncached_ipi_visibility(void *data)
|
|
{
|
|
int status;
|
|
|
|
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
|
|
if ((status != PAL_VISIBILITY_OK) &&
|
|
(status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
|
|
printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
|
|
"CPU %i\n", status, get_cpu());
|
|
}
|
|
|
|
|
|
static void uncached_ipi_mc_drain(void *data)
|
|
{
|
|
int status;
|
|
status = ia64_pal_mc_drain();
|
|
if (status)
|
|
printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
|
|
"CPU %i\n", status, get_cpu());
|
|
}
|
|
|
|
|
|
static unsigned long
|
|
uncached_get_new_chunk(struct gen_pool *poolp)
|
|
{
|
|
struct page *page;
|
|
void *tmp;
|
|
int status, i;
|
|
unsigned long addr, node;
|
|
|
|
if (allocated_granules >= MAX_UNCACHED_GRANULES)
|
|
return 0;
|
|
|
|
node = poolp->private;
|
|
page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
|
|
IA64_GRANULE_SHIFT-PAGE_SHIFT);
|
|
|
|
dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n",
|
|
page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);
|
|
|
|
/*
|
|
* Do magic if no mem on local node! XXX
|
|
*/
|
|
if (!page)
|
|
return 0;
|
|
tmp = page_address(page);
|
|
|
|
/*
|
|
* There's a small race here where it's possible for someone to
|
|
* access the page through /dev/mem halfway through the conversion
|
|
* to uncached - not sure it's really worth bothering about
|
|
*/
|
|
for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
|
|
SetPageUncached(&page[i]);
|
|
|
|
flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE);
|
|
|
|
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
|
|
|
|
dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
|
|
status, get_cpu());
|
|
|
|
if (!status) {
|
|
status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
|
|
if (status)
|
|
printk(KERN_WARNING "smp_call_function failed for "
|
|
"uncached_ipi_visibility! (%i)\n", status);
|
|
}
|
|
|
|
if (ia64_platform_is("sn2"))
|
|
sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE);
|
|
else
|
|
flush_icache_range((unsigned long)tmp,
|
|
(unsigned long)tmp+IA64_GRANULE_SIZE);
|
|
|
|
ia64_pal_mc_drain();
|
|
status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
|
|
if (status)
|
|
printk(KERN_WARNING "smp_call_function failed for "
|
|
"uncached_ipi_mc_drain! (%i)\n", status);
|
|
|
|
addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
|
|
|
|
allocated_granules++;
|
|
return addr;
|
|
}
|
|
|
|
|
|
/*
|
|
* uncached_alloc_page
|
|
*
|
|
* Allocate 1 uncached page. Allocates on the requested node. If no
|
|
* uncached pages are available on the requested node, roundrobin starting
|
|
* with higher nodes.
|
|
*/
|
|
unsigned long
|
|
uncached_alloc_page(int nid)
|
|
{
|
|
unsigned long maddr;
|
|
|
|
maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE);
|
|
|
|
dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n",
|
|
maddr, nid);
|
|
|
|
/*
|
|
* If no memory is availble on our local node, try the
|
|
* remaining nodes in the system.
|
|
*/
|
|
if (!maddr) {
|
|
int i;
|
|
|
|
for (i = MAX_NUMNODES - 1; i >= 0; i--) {
|
|
if (i == nid || !node_online(i))
|
|
continue;
|
|
maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE);
|
|
dprintk(KERN_DEBUG "uncached_alloc_page alternate search "
|
|
"returns %lx on node %i\n", maddr, i);
|
|
if (maddr) {
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
return maddr;
|
|
}
|
|
EXPORT_SYMBOL(uncached_alloc_page);
|
|
|
|
|
|
/*
|
|
* uncached_free_page
|
|
*
|
|
* Free a single uncached page.
|
|
*/
|
|
void
|
|
uncached_free_page(unsigned long maddr)
|
|
{
|
|
int node;
|
|
|
|
node = nasid_to_cnodeid(NASID_GET(maddr));
|
|
|
|
dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);
|
|
|
|
if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
|
|
panic("uncached_free_page invalid address %lx\n", maddr);
|
|
|
|
gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE);
|
|
}
|
|
EXPORT_SYMBOL(uncached_free_page);
|
|
|
|
|
|
/*
|
|
* uncached_build_memmap,
|
|
*
|
|
* Called at boot time to build a map of pages that can be used for
|
|
* memory special operations.
|
|
*/
|
|
static int __init
|
|
uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
|
|
{
|
|
long length;
|
|
unsigned long vstart, vend;
|
|
int node;
|
|
|
|
length = end - start;
|
|
vstart = start + __IA64_UNCACHED_OFFSET;
|
|
vend = end + __IA64_UNCACHED_OFFSET;
|
|
|
|
dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
|
|
|
|
memset((char *)vstart, 0, length);
|
|
|
|
node = nasid_to_cnodeid(NASID_GET(start));
|
|
|
|
for (; vstart < vend ; vstart += PAGE_SIZE) {
|
|
dprintk(KERN_INFO "sticking %lx into the pool!\n", vstart);
|
|
gen_pool_free(uncached_pool[node], vstart, PAGE_SIZE);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
static int __init uncached_init(void) {
|
|
int i;
|
|
|
|
for (i = 0; i < MAX_NUMNODES; i++) {
|
|
if (!node_online(i))
|
|
continue;
|
|
uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
|
|
&uncached_get_new_chunk, i);
|
|
}
|
|
|
|
efi_memmap_walk_uc(uncached_build_memmap);
|
|
|
|
return 0;
|
|
}
|
|
|
|
__initcall(uncached_init);
|