2005-04-16 18:20:36 -04:00
|
|
|
/*
|
|
|
|
* mm/mprotect.c
|
|
|
|
*
|
|
|
|
* (C) Copyright 1994 Linus Torvalds
|
|
|
|
* (C) Copyright 2002 Christoph Hellwig
|
|
|
|
*
|
|
|
|
* Address space accounting code <alan@redhat.com>
|
|
|
|
* (C) Copyright 2002 Red Hat Inc, All Rights Reserved
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/hugetlb.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/shm.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/mempolicy.h>
|
|
|
|
#include <linux/personality.h>
|
|
|
|
#include <linux/syscalls.h>
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 05:03:35 -04:00
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/swapops.h>
|
2005-04-16 18:20:36 -04:00
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
2008-05-14 19:05:51 -04:00
|
|
|
#ifndef pgprot_modify
|
|
|
|
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
|
|
|
|
{
|
|
|
|
return newprot;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
static void change_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
2006-09-26 02:30:59 -04:00
|
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
|
|
int dirty_accountable)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 05:03:35 -04:00
|
|
|
pte_t *pte, oldpte;
|
2005-10-29 21:16:27 -04:00
|
|
|
spinlock_t *ptl;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
2005-10-29 21:16:27 -04:00
|
|
|
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
|
2006-10-01 02:29:33 -04:00
|
|
|
arch_enter_lazy_mmu_mode();
|
2005-04-16 18:20:36 -04:00
|
|
|
do {
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 05:03:35 -04:00
|
|
|
oldpte = *pte;
|
|
|
|
if (pte_present(oldpte)) {
|
2005-04-16 18:20:36 -04:00
|
|
|
pte_t ptent;
|
|
|
|
|
mm: add a ptep_modify_prot transaction abstraction
This patch adds an API for doing read-modify-write updates to a pte's
protection bits which may race against hardware updates to the pte.
After reading the pte, the hardware may asynchonously set the accessed
or dirty bits on a pte, which would be lost when writing back the
modified pte value.
The existing technique to handle this race is to use
ptep_get_and_clear() atomically fetch the old pte value and clear it
in memory. This has the effect of marking the pte as non-present,
which will prevent the hardware from updating its state. When the new
value is written back, the pte will be present again, and the hardware
can resume updating the access/dirty flags.
When running in a virtualized environment, pagetable updates are
relatively expensive, since they generally involve some trap into the
hypervisor. To mitigate the cost of these updates, we tend to batch
them.
However, because of the atomic nature of ptep_get_and_clear(), it is
inherently non-batchable. This new interface allows batching by
giving the underlying implementation enough information to open a
transaction between the read and write phases:
ptep_modify_prot_start() returns the current pte value, and puts the
pte entry into a state where either the hardware will not update the
pte, or if it does, the updates will be preserved on commit.
ptep_modify_prot_commit() writes back the updated pte, makes sure that
any hardware updates made since ptep_modify_prot_start() are
preserved.
ptep_modify_prot_start() and _commit() must be exactly paired, and
used while holding the appropriate pte lock. They do not protect
against other software updates of the pte in any way.
The current implementations of ptep_modify_prot_start and _commit are
functionally unchanged from before: _start() uses ptep_get_and_clear()
fetch the pte and zero the entry, preventing any hardware updates.
_commit() simply writes the new pte value back knowing that the
hardware has not updated the pte in the meantime.
The only current user of this interface is mprotect
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-06-16 07:30:00 -04:00
|
|
|
ptent = ptep_modify_prot_start(mm, addr, pte);
|
2006-09-26 02:30:59 -04:00
|
|
|
ptent = pte_modify(ptent, newprot);
|
mm: add a ptep_modify_prot transaction abstraction
This patch adds an API for doing read-modify-write updates to a pte's
protection bits which may race against hardware updates to the pte.
After reading the pte, the hardware may asynchonously set the accessed
or dirty bits on a pte, which would be lost when writing back the
modified pte value.
The existing technique to handle this race is to use
ptep_get_and_clear() atomically fetch the old pte value and clear it
in memory. This has the effect of marking the pte as non-present,
which will prevent the hardware from updating its state. When the new
value is written back, the pte will be present again, and the hardware
can resume updating the access/dirty flags.
When running in a virtualized environment, pagetable updates are
relatively expensive, since they generally involve some trap into the
hypervisor. To mitigate the cost of these updates, we tend to batch
them.
However, because of the atomic nature of ptep_get_and_clear(), it is
inherently non-batchable. This new interface allows batching by
giving the underlying implementation enough information to open a
transaction between the read and write phases:
ptep_modify_prot_start() returns the current pte value, and puts the
pte entry into a state where either the hardware will not update the
pte, or if it does, the updates will be preserved on commit.
ptep_modify_prot_commit() writes back the updated pte, makes sure that
any hardware updates made since ptep_modify_prot_start() are
preserved.
ptep_modify_prot_start() and _commit() must be exactly paired, and
used while holding the appropriate pte lock. They do not protect
against other software updates of the pte in any way.
The current implementations of ptep_modify_prot_start and _commit are
functionally unchanged from before: _start() uses ptep_get_and_clear()
fetch the pte and zero the entry, preventing any hardware updates.
_commit() simply writes the new pte value back knowing that the
hardware has not updated the pte in the meantime.
The only current user of this interface is mprotect
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-06-16 07:30:00 -04:00
|
|
|
|
2006-09-26 02:30:59 -04:00
|
|
|
/*
|
|
|
|
* Avoid taking write faults for pages we know to be
|
|
|
|
* dirty.
|
|
|
|
*/
|
|
|
|
if (dirty_accountable && pte_dirty(ptent))
|
|
|
|
ptent = pte_mkwrite(ptent);
|
mm: add a ptep_modify_prot transaction abstraction
This patch adds an API for doing read-modify-write updates to a pte's
protection bits which may race against hardware updates to the pte.
After reading the pte, the hardware may asynchonously set the accessed
or dirty bits on a pte, which would be lost when writing back the
modified pte value.
The existing technique to handle this race is to use
ptep_get_and_clear() atomically fetch the old pte value and clear it
in memory. This has the effect of marking the pte as non-present,
which will prevent the hardware from updating its state. When the new
value is written back, the pte will be present again, and the hardware
can resume updating the access/dirty flags.
When running in a virtualized environment, pagetable updates are
relatively expensive, since they generally involve some trap into the
hypervisor. To mitigate the cost of these updates, we tend to batch
them.
However, because of the atomic nature of ptep_get_and_clear(), it is
inherently non-batchable. This new interface allows batching by
giving the underlying implementation enough information to open a
transaction between the read and write phases:
ptep_modify_prot_start() returns the current pte value, and puts the
pte entry into a state where either the hardware will not update the
pte, or if it does, the updates will be preserved on commit.
ptep_modify_prot_commit() writes back the updated pte, makes sure that
any hardware updates made since ptep_modify_prot_start() are
preserved.
ptep_modify_prot_start() and _commit() must be exactly paired, and
used while holding the appropriate pte lock. They do not protect
against other software updates of the pte in any way.
The current implementations of ptep_modify_prot_start and _commit are
functionally unchanged from before: _start() uses ptep_get_and_clear()
fetch the pte and zero the entry, preventing any hardware updates.
_commit() simply writes the new pte value back knowing that the
hardware has not updated the pte in the meantime.
The only current user of this interface is mprotect
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Acked-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-06-16 07:30:00 -04:00
|
|
|
|
|
|
|
ptep_modify_prot_commit(mm, addr, pte, ptent);
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 05:03:35 -04:00
|
|
|
#ifdef CONFIG_MIGRATION
|
|
|
|
} else if (!pte_file(oldpte)) {
|
|
|
|
swp_entry_t entry = pte_to_swp_entry(oldpte);
|
|
|
|
|
|
|
|
if (is_write_migration_entry(entry)) {
|
|
|
|
/*
|
|
|
|
* A protection check is difficult so
|
|
|
|
* just be safe and disable write
|
|
|
|
*/
|
|
|
|
make_migration_entry_read(&entry);
|
|
|
|
set_pte_at(mm, addr, pte,
|
|
|
|
swp_entry_to_pte(entry));
|
|
|
|
}
|
|
|
|
#endif
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
[PATCH] Swapless page migration: add R/W migration entries
Implement read/write migration ptes
We take the upper two swapfiles for the two types of migration ptes and define
a series of macros in swapops.h.
The VM is modified to handle the migration entries. migration entries can
only be encountered when the page they are pointing to is locked. This limits
the number of places one has to fix. We also check in copy_pte_range and in
mprotect_pte_range() for migration ptes.
We check for migration ptes in do_swap_cache and call a function that will
then wait on the page lock. This allows us to effectively stop all accesses
to apge.
Migration entries are created by try_to_unmap if called for migration and
removed by local functions in migrate.c
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration (I've no NUMA, just
hacking it up to migrate recklessly while running load), I've hit the
BUG_ON(!PageLocked(p)) in migration_entry_to_page.
This comes from an orphaned migration entry, unrelated to the current
correctly locked migration, but hit by remove_anon_migration_ptes as it
checks an address in each vma of the anon_vma list.
Such an orphan may be left behind if an earlier migration raced with fork:
copy_one_pte can duplicate a migration entry from parent to child, after
remove_anon_migration_ptes has checked the child vma, but before it has
removed it from the parent vma. (If the process were later to fault on this
orphaned entry, it would hit the same BUG from migration_entry_wait.)
This could be fixed by locking anon_vma in copy_one_pte, but we'd rather
not. There's no such problem with file pages, because vma_prio_tree_add
adds child vma after parent vma, and the page table locking at each end is
enough to serialize. Follow that example with anon_vma: add new vmas to the
tail instead of the head.
(There's no corresponding problem when inserting migration entries,
because a missed pte will leave the page count and mapcount high, which is
allowed for. And there's no corresponding problem when migrating via swap,
because a leftover swap entry will be correctly faulted. But the swapless
method has no refcounting of its entries.)
From: Ingo Molnar <mingo@elte.hu>
pte_unmap_unlock() takes the pte pointer as an argument.
From: Hugh Dickins <hugh@veritas.com>
Several times while testing swapless page migration, gcc has tried to exec
a pointer instead of a string: smells like COW mappings are not being
properly write-protected on fork.
The protection in copy_one_pte looks very convincing, until at last you
realize that the second arg to make_migration_entry is a boolean "write",
and SWP_MIGRATION_READ is 30.
Anyway, it's better done like in change_pte_range, using
is_write_migration_entry and make_migration_entry_read.
From: Hugh Dickins <hugh@veritas.com>
Remove unnecessary obfuscation from sys_swapon's range check on swap type,
which blew up causing memory corruption once swapless migration made
MAX_SWAPFILES no longer 2 ^ MAX_SWAPFILES_SHIFT.
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
From: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-23 05:03:35 -04:00
|
|
|
|
2005-04-16 18:20:36 -04:00
|
|
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
2006-10-01 02:29:33 -04:00
|
|
|
arch_leave_lazy_mmu_mode();
|
2005-10-29 21:16:27 -04:00
|
|
|
pte_unmap_unlock(pte - 1, ptl);
|
2005-04-16 18:20:36 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void change_pmd_range(struct mm_struct *mm, pud_t *pud,
|
2006-09-26 02:30:59 -04:00
|
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
|
|
int dirty_accountable)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
pmd_t *pmd;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
pmd = pmd_offset(pud, addr);
|
|
|
|
do {
|
|
|
|
next = pmd_addr_end(addr, end);
|
|
|
|
if (pmd_none_or_clear_bad(pmd))
|
|
|
|
continue;
|
2006-09-26 02:30:59 -04:00
|
|
|
change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
|
2005-04-16 18:20:36 -04:00
|
|
|
} while (pmd++, addr = next, addr != end);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void change_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
2006-09-26 02:30:59 -04:00
|
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
|
|
int dirty_accountable)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
pud_t *pud;
|
|
|
|
unsigned long next;
|
|
|
|
|
|
|
|
pud = pud_offset(pgd, addr);
|
|
|
|
do {
|
|
|
|
next = pud_addr_end(addr, end);
|
|
|
|
if (pud_none_or_clear_bad(pud))
|
|
|
|
continue;
|
2006-09-26 02:30:59 -04:00
|
|
|
change_pmd_range(mm, pud, addr, next, newprot, dirty_accountable);
|
2005-04-16 18:20:36 -04:00
|
|
|
} while (pud++, addr = next, addr != end);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void change_protection(struct vm_area_struct *vma,
|
2006-09-26 02:30:59 -04:00
|
|
|
unsigned long addr, unsigned long end, pgprot_t newprot,
|
|
|
|
int dirty_accountable)
|
2005-04-16 18:20:36 -04:00
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
pgd_t *pgd;
|
|
|
|
unsigned long next;
|
|
|
|
unsigned long start = addr;
|
|
|
|
|
|
|
|
BUG_ON(addr >= end);
|
|
|
|
pgd = pgd_offset(mm, addr);
|
|
|
|
flush_cache_range(vma, addr, end);
|
|
|
|
do {
|
|
|
|
next = pgd_addr_end(addr, end);
|
|
|
|
if (pgd_none_or_clear_bad(pgd))
|
|
|
|
continue;
|
2006-09-26 02:30:59 -04:00
|
|
|
change_pud_range(mm, pgd, addr, next, newprot, dirty_accountable);
|
2005-04-16 18:20:36 -04:00
|
|
|
} while (pgd++, addr = next, addr != end);
|
|
|
|
flush_tlb_range(vma, start, end);
|
|
|
|
}
|
|
|
|
|
2007-07-19 04:48:16 -04:00
|
|
|
int
|
2005-04-16 18:20:36 -04:00
|
|
|
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
|
|
|
|
unsigned long start, unsigned long end, unsigned long newflags)
|
|
|
|
{
|
|
|
|
struct mm_struct *mm = vma->vm_mm;
|
|
|
|
unsigned long oldflags = vma->vm_flags;
|
|
|
|
long nrpages = (end - start) >> PAGE_SHIFT;
|
|
|
|
unsigned long charged = 0;
|
|
|
|
pgoff_t pgoff;
|
|
|
|
int error;
|
2006-09-26 02:30:59 -04:00
|
|
|
int dirty_accountable = 0;
|
2005-04-16 18:20:36 -04:00
|
|
|
|
|
|
|
if (newflags == oldflags) {
|
|
|
|
*pprev = vma;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we make a private mapping writable we increase our commit;
|
|
|
|
* but (without finer accounting) cannot reduce our commit if we
|
|
|
|
* make it unwritable again.
|
|
|
|
*
|
|
|
|
* FIXME? We haven't defined a VM_NORESERVE flag, so mprotecting
|
|
|
|
* a MAP_NORESERVE private mapping to writable will now reserve.
|
|
|
|
*/
|
|
|
|
if (newflags & VM_WRITE) {
|
2006-03-22 03:08:50 -05:00
|
|
|
if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_SHARED))) {
|
2005-04-16 18:20:36 -04:00
|
|
|
charged = nrpages;
|
|
|
|
if (security_vm_enough_memory(charged))
|
|
|
|
return -ENOMEM;
|
|
|
|
newflags |= VM_ACCOUNT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First try to merge with previous and/or next vma.
|
|
|
|
*/
|
|
|
|
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
|
|
|
*pprev = vma_merge(mm, *pprev, start, end, newflags,
|
|
|
|
vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma));
|
|
|
|
if (*pprev) {
|
|
|
|
vma = *pprev;
|
|
|
|
goto success;
|
|
|
|
}
|
|
|
|
|
|
|
|
*pprev = vma;
|
|
|
|
|
|
|
|
if (start != vma->vm_start) {
|
|
|
|
error = split_vma(mm, vma, start, 1);
|
|
|
|
if (error)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (end != vma->vm_end) {
|
|
|
|
error = split_vma(mm, vma, end, 0);
|
|
|
|
if (error)
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
success:
|
|
|
|
/*
|
|
|
|
* vm_flags and vm_page_prot are protected by the mmap_sem
|
|
|
|
* held in write mode.
|
|
|
|
*/
|
|
|
|
vma->vm_flags = newflags;
|
2008-05-14 19:05:51 -04:00
|
|
|
vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
|
|
|
|
vm_get_page_prot(newflags));
|
|
|
|
|
2006-09-26 02:30:59 -04:00
|
|
|
if (vma_wants_writenotify(vma)) {
|
2007-10-22 23:45:12 -04:00
|
|
|
vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
|
2006-09-26 02:30:59 -04:00
|
|
|
dirty_accountable = 1;
|
|
|
|
}
|
2006-09-26 02:30:57 -04:00
|
|
|
|
2006-03-22 03:08:50 -05:00
|
|
|
if (is_vm_hugetlb_page(vma))
|
2006-09-26 02:30:57 -04:00
|
|
|
hugetlb_change_protection(vma, start, end, vma->vm_page_prot);
|
2006-03-22 03:08:50 -05:00
|
|
|
else
|
2006-09-26 02:30:59 -04:00
|
|
|
change_protection(vma, start, end, vma->vm_page_prot, dirty_accountable);
|
2005-10-29 21:15:56 -04:00
|
|
|
vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
|
|
|
|
vm_stat_account(mm, newflags, vma->vm_file, nrpages);
|
2005-04-16 18:20:36 -04:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
vm_unacct_memory(charged);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long
|
|
|
|
sys_mprotect(unsigned long start, size_t len, unsigned long prot)
|
|
|
|
{
|
|
|
|
unsigned long vm_flags, nstart, end, tmp, reqprot;
|
|
|
|
struct vm_area_struct *vma, *prev;
|
|
|
|
int error = -EINVAL;
|
|
|
|
const int grows = prot & (PROT_GROWSDOWN|PROT_GROWSUP);
|
|
|
|
prot &= ~(PROT_GROWSDOWN|PROT_GROWSUP);
|
|
|
|
if (grows == (PROT_GROWSDOWN|PROT_GROWSUP)) /* can't be both */
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
if (start & ~PAGE_MASK)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!len)
|
|
|
|
return 0;
|
|
|
|
len = PAGE_ALIGN(len);
|
|
|
|
end = start + len;
|
|
|
|
if (end <= start)
|
|
|
|
return -ENOMEM;
|
2008-07-07 10:28:51 -04:00
|
|
|
if (!arch_validate_prot(prot))
|
2005-04-16 18:20:36 -04:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
reqprot = prot;
|
|
|
|
/*
|
|
|
|
* Does the application expect PROT_READ to imply PROT_EXEC:
|
|
|
|
*/
|
2006-06-23 05:03:23 -04:00
|
|
|
if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
|
2005-04-16 18:20:36 -04:00
|
|
|
prot |= PROT_EXEC;
|
|
|
|
|
|
|
|
vm_flags = calc_vm_prot_bits(prot);
|
|
|
|
|
|
|
|
down_write(¤t->mm->mmap_sem);
|
|
|
|
|
|
|
|
vma = find_vma_prev(current->mm, start, &prev);
|
|
|
|
error = -ENOMEM;
|
|
|
|
if (!vma)
|
|
|
|
goto out;
|
|
|
|
if (unlikely(grows & PROT_GROWSDOWN)) {
|
|
|
|
if (vma->vm_start >= end)
|
|
|
|
goto out;
|
|
|
|
start = vma->vm_start;
|
|
|
|
error = -EINVAL;
|
|
|
|
if (!(vma->vm_flags & VM_GROWSDOWN))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (vma->vm_start > start)
|
|
|
|
goto out;
|
|
|
|
if (unlikely(grows & PROT_GROWSUP)) {
|
|
|
|
end = vma->vm_end;
|
|
|
|
error = -EINVAL;
|
|
|
|
if (!(vma->vm_flags & VM_GROWSUP))
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (start > vma->vm_start)
|
|
|
|
prev = vma;
|
|
|
|
|
|
|
|
for (nstart = start ; ; ) {
|
|
|
|
unsigned long newflags;
|
|
|
|
|
|
|
|
/* Here we know that vma->vm_start <= nstart < vma->vm_end. */
|
|
|
|
|
|
|
|
newflags = vm_flags | (vma->vm_flags & ~(VM_READ | VM_WRITE | VM_EXEC));
|
|
|
|
|
2005-09-21 12:55:39 -04:00
|
|
|
/* newflags >> 4 shift VM_MAY% in place of VM_% */
|
|
|
|
if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
|
2005-04-16 18:20:36 -04:00
|
|
|
error = -EACCES;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = security_file_mprotect(vma, reqprot, prot);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
tmp = vma->vm_end;
|
|
|
|
if (tmp > end)
|
|
|
|
tmp = end;
|
|
|
|
error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
|
|
|
|
if (error)
|
|
|
|
goto out;
|
|
|
|
nstart = tmp;
|
|
|
|
|
|
|
|
if (nstart < prev->vm_end)
|
|
|
|
nstart = prev->vm_end;
|
|
|
|
if (nstart >= end)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
vma = prev->vm_next;
|
|
|
|
if (!vma || vma->vm_start != nstart) {
|
|
|
|
error = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
up_write(¤t->mm->mmap_sem);
|
|
|
|
return error;
|
|
|
|
}
|