android_kernel_xiaomi_sm8350/fs/afs/file.c

299 lines
6.4 KiB
C
Raw Normal View History

/* AFS filesystem file handling
*
* Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include "internal.h"
static int afs_file_readpage(struct file *file, struct page *page);
static void afs_file_invalidatepage(struct page *page, unsigned long offset);
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags);
const struct file_operations afs_file_operations = {
.open = afs_open,
.release = afs_release,
.llseek = generic_file_llseek,
.read = do_sync_read,
.aio_read = generic_file_aio_read,
.mmap = generic_file_readonly_mmap,
.sendfile = generic_file_sendfile,
};
const struct inode_operations afs_file_inode_operations = {
.getattr = afs_inode_getattr,
.permission = afs_permission,
};
const struct address_space_operations afs_fs_aops = {
.readpage = afs_file_readpage,
.set_page_dirty = __set_page_dirty_nobuffers,
.releasepage = afs_file_releasepage,
.invalidatepage = afs_file_invalidatepage,
};
/*
* open an AFS file or directory and attach a key to it
*/
int afs_open(struct inode *inode, struct file *file)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
struct key *key;
int ret;
_enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode);
key = afs_request_key(vnode->volume->cell);
if (IS_ERR(key)) {
_leave(" = %ld [key]", PTR_ERR(key));
return PTR_ERR(key);
}
ret = afs_validate(vnode, key);
if (ret < 0) {
_leave(" = %d [val]", ret);
return ret;
}
file->private_data = key;
_leave(" = 0");
return 0;
}
/*
* release an AFS file or directory and discard its key
*/
int afs_release(struct inode *inode, struct file *file)
{
struct afs_vnode *vnode = AFS_FS_I(inode);
_enter("{%x:%x},", vnode->fid.vid, vnode->fid.vnode);
key_put(file->private_data);
_leave(" = 0");
return 0;
}
/*
* deal with notification that a page was read from the cache
*/
#ifdef AFS_CACHING_SUPPORT
static void afs_file_readpage_read_complete(void *cookie_data,
struct page *page,
void *data,
int error)
{
_enter("%p,%p,%p,%d", cookie_data, page, data, error);
if (error)
SetPageError(page);
else
SetPageUptodate(page);
unlock_page(page);
}
#endif
/*
* deal with notification that a page was written to the cache
*/
#ifdef AFS_CACHING_SUPPORT
static void afs_file_readpage_write_complete(void *cookie_data,
struct page *page,
void *data,
int error)
{
_enter("%p,%p,%p,%d", cookie_data, page, data, error);
unlock_page(page);
}
#endif
/*
* AFS read page from file (or symlink)
*/
static int afs_file_readpage(struct file *file, struct page *page)
{
struct afs_vnode *vnode;
struct inode *inode;
struct key *key;
size_t len;
off_t offset;
int ret;
inode = page->mapping->host;
ASSERT(file != NULL);
key = file->private_data;
ASSERT(key != NULL);
_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
vnode = AFS_FS_I(inode);
BUG_ON(!PageLocked(page));
ret = -ESTALE;
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
goto error;
#ifdef AFS_CACHING_SUPPORT
/* is it cached? */
ret = cachefs_read_or_alloc_page(vnode->cache,
page,
afs_file_readpage_read_complete,
NULL,
GFP_KERNEL);
#else
ret = -ENOBUFS;
#endif
switch (ret) {
/* read BIO submitted and wb-journal entry found */
case 1:
BUG(); // TODO - handle wb-journal match
/* read BIO submitted (page in cache) */
case 0:
break;
/* no page available in cache */
case -ENOBUFS:
case -ENODATA:
default:
offset = page->index << PAGE_CACHE_SHIFT;
len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
/* read the contents of the file from the server into the
* page */
ret = afs_vnode_fetch_data(vnode, key, offset, len, page);
if (ret < 0) {
if (ret == -ENOENT) {
_debug("got NOENT from server"
" - marking file deleted and stale");
set_bit(AFS_VNODE_DELETED, &vnode->flags);
ret = -ESTALE;
}
#ifdef AFS_CACHING_SUPPORT
cachefs_uncache_page(vnode->cache, page);
#endif
goto error;
}
SetPageUptodate(page);
#ifdef AFS_CACHING_SUPPORT
if (cachefs_write_page(vnode->cache,
page,
afs_file_readpage_write_complete,
NULL,
GFP_KERNEL) != 0
) {
cachefs_uncache_page(vnode->cache, page);
unlock_page(page);
}
#else
unlock_page(page);
#endif
}
_leave(" = 0");
return 0;
error:
SetPageError(page);
unlock_page(page);
_leave(" = %d", ret);
return ret;
}
/*
* get a page cookie for the specified page
*/
#ifdef AFS_CACHING_SUPPORT
int afs_cache_get_page_cookie(struct page *page,
struct cachefs_page **_page_cookie)
{
int ret;
_enter("");
ret = cachefs_page_get_private(page,_page_cookie, GFP_NOIO);
_leave(" = %d", ret);
return ret;
}
#endif
/*
* invalidate part or all of a page
*/
static void afs_file_invalidatepage(struct page *page, unsigned long offset)
{
int ret = 1;
_enter("{%lu},%lu", page->index, offset);
BUG_ON(!PageLocked(page));
if (PagePrivate(page)) {
#ifdef AFS_CACHING_SUPPORT
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
cachefs_uncache_page(vnode->cache,page);
#endif
/* We release buffers only if the entire page is being
* invalidated.
* The get_block cached value has been unconditionally
* invalidated, so real IO is not possible anymore.
*/
if (offset == 0) {
BUG_ON(!PageLocked(page));
ret = 0;
if (!PageWriteback(page))
ret = page->mapping->a_ops->releasepage(page,
0);
/* possibly should BUG_ON(!ret); - neilb */
}
}
_leave(" = %d", ret);
}
/*
* release a page and cleanup its private data
*/
static int afs_file_releasepage(struct page *page, gfp_t gfp_flags)
{
struct cachefs_page *pageio;
_enter("{%lu},%x", page->index, gfp_flags);
if (PagePrivate(page)) {
#ifdef AFS_CACHING_SUPPORT
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
cachefs_uncache_page(vnode->cache, page);
#endif
[PATCH] mm: split page table lock Christoph Lameter demonstrated very poor scalability on the SGI 512-way, with a many-threaded application which concurrently initializes different parts of a large anonymous area. This patch corrects that, by using a separate spinlock per page table page, to guard the page table entries in that page, instead of using the mm's single page_table_lock. (But even then, page_table_lock is still used to guard page table allocation, and anon_vma allocation.) In this implementation, the spinlock is tucked inside the struct page of the page table page: with a BUILD_BUG_ON in case it overflows - which it would in the case of 32-bit PA-RISC with spinlock debugging enabled. Splitting the lock is not quite for free: another cacheline access. Ideally, I suppose we would use split ptlock only for multi-threaded processes on multi-cpu machines; but deciding that dynamically would have its own costs. So for now enable it by config, at some number of cpus - since the Kconfig language doesn't support inequalities, let preprocessor compare that with NR_CPUS. But I don't think it's worth being user-configurable: for good testing of both split and unsplit configs, split now at 4 cpus, and perhaps change that to 8 later. There is a benefit even for singly threaded processes: kswapd can be attacking one part of the mm while another part is busy faulting. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-10-29 21:16:40 -04:00
pageio = (struct cachefs_page *) page_private(page);
set_page_private(page, 0);
ClearPagePrivate(page);
kfree(pageio);
}
_leave(" = 0");
return 0;
}