mbcache: add functions to delete entry if unused
[ Upstream commit 3dc96bba65f53daa217f0a8f43edad145286a8f5 ]
Add function mb_cache_entry_delete_or_get() to delete mbcache entry if
it is unused and also add a function to wait for entry to become unused
- mb_cache_entry_wait_unused(). We do not share code between the two
deleting function as one of them will go away soon.
CC: stable@vger.kernel.org
Fixes: 82939d7999
("ext4: convert to mbcache2")
Signed-off-by: Jan Kara <jack@suse.cz>
Link: https://lore.kernel.org/r/20220712105436.32204-2-jack@suse.cz
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Stable-dep-of: a44e84a9b776 ("ext4: fix deadlock due to mbcache entry corruption")
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
81b0bb1eb2
commit
951ea4d3df
66
fs/mbcache.c
66
fs/mbcache.c
@ -11,7 +11,7 @@
|
|||||||
/*
|
/*
|
||||||
* Mbcache is a simple key-value store. Keys need not be unique, however
|
* Mbcache is a simple key-value store. Keys need not be unique, however
|
||||||
* key-value pairs are expected to be unique (we use this fact in
|
* key-value pairs are expected to be unique (we use this fact in
|
||||||
* mb_cache_entry_delete()).
|
* mb_cache_entry_delete_or_get()).
|
||||||
*
|
*
|
||||||
* Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
|
* Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
|
||||||
* Ext4 also uses it for deduplication of xattr values stored in inodes.
|
* Ext4 also uses it for deduplication of xattr values stored in inodes.
|
||||||
@ -125,6 +125,19 @@ void __mb_cache_entry_free(struct mb_cache_entry *entry)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__mb_cache_entry_free);
|
EXPORT_SYMBOL(__mb_cache_entry_free);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mb_cache_entry_wait_unused - wait to be the last user of the entry
|
||||||
|
*
|
||||||
|
* @entry - entry to work on
|
||||||
|
*
|
||||||
|
* Wait to be the last user of the entry.
|
||||||
|
*/
|
||||||
|
void mb_cache_entry_wait_unused(struct mb_cache_entry *entry)
|
||||||
|
{
|
||||||
|
wait_var_event(&entry->e_refcnt, atomic_read(&entry->e_refcnt) <= 3);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mb_cache_entry_wait_unused);
|
||||||
|
|
||||||
static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
|
static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
|
||||||
struct mb_cache_entry *entry,
|
struct mb_cache_entry *entry,
|
||||||
u32 key)
|
u32 key)
|
||||||
@ -217,7 +230,7 @@ out:
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mb_cache_entry_get);
|
EXPORT_SYMBOL(mb_cache_entry_get);
|
||||||
|
|
||||||
/* mb_cache_entry_delete - remove a cache entry
|
/* mb_cache_entry_delete - try to remove a cache entry
|
||||||
* @cache - cache we work with
|
* @cache - cache we work with
|
||||||
* @key - key
|
* @key - key
|
||||||
* @value - value
|
* @value - value
|
||||||
@ -254,6 +267,55 @@ void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(mb_cache_entry_delete);
|
EXPORT_SYMBOL(mb_cache_entry_delete);
|
||||||
|
|
||||||
|
/* mb_cache_entry_delete_or_get - remove a cache entry if it has no users
|
||||||
|
* @cache - cache we work with
|
||||||
|
* @key - key
|
||||||
|
* @value - value
|
||||||
|
*
|
||||||
|
* Remove entry from cache @cache with key @key and value @value. The removal
|
||||||
|
* happens only if the entry is unused. The function returns NULL in case the
|
||||||
|
* entry was successfully removed or there's no entry in cache. Otherwise the
|
||||||
|
* function grabs reference of the entry that we failed to delete because it
|
||||||
|
* still has users and return it.
|
||||||
|
*/
|
||||||
|
struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
|
||||||
|
u32 key, u64 value)
|
||||||
|
{
|
||||||
|
struct hlist_bl_node *node;
|
||||||
|
struct hlist_bl_head *head;
|
||||||
|
struct mb_cache_entry *entry;
|
||||||
|
|
||||||
|
head = mb_cache_entry_head(cache, key);
|
||||||
|
hlist_bl_lock(head);
|
||||||
|
hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
|
||||||
|
if (entry->e_key == key && entry->e_value == value) {
|
||||||
|
if (atomic_read(&entry->e_refcnt) > 2) {
|
||||||
|
atomic_inc(&entry->e_refcnt);
|
||||||
|
hlist_bl_unlock(head);
|
||||||
|
return entry;
|
||||||
|
}
|
||||||
|
/* We keep hash list reference to keep entry alive */
|
||||||
|
hlist_bl_del_init(&entry->e_hash_list);
|
||||||
|
hlist_bl_unlock(head);
|
||||||
|
spin_lock(&cache->c_list_lock);
|
||||||
|
if (!list_empty(&entry->e_list)) {
|
||||||
|
list_del_init(&entry->e_list);
|
||||||
|
if (!WARN_ONCE(cache->c_entry_count == 0,
|
||||||
|
"mbcache: attempt to decrement c_entry_count past zero"))
|
||||||
|
cache->c_entry_count--;
|
||||||
|
atomic_dec(&entry->e_refcnt);
|
||||||
|
}
|
||||||
|
spin_unlock(&cache->c_list_lock);
|
||||||
|
mb_cache_entry_put(cache, entry);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hlist_bl_unlock(head);
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(mb_cache_entry_delete_or_get);
|
||||||
|
|
||||||
/* mb_cache_entry_touch - cache entry got used
|
/* mb_cache_entry_touch - cache entry got used
|
||||||
* @cache - cache the entry belongs to
|
* @cache - cache the entry belongs to
|
||||||
* @entry - entry that got used
|
* @entry - entry that got used
|
||||||
|
@ -30,15 +30,23 @@ void mb_cache_destroy(struct mb_cache *cache);
|
|||||||
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
|
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
|
||||||
u64 value, bool reusable);
|
u64 value, bool reusable);
|
||||||
void __mb_cache_entry_free(struct mb_cache_entry *entry);
|
void __mb_cache_entry_free(struct mb_cache_entry *entry);
|
||||||
|
void mb_cache_entry_wait_unused(struct mb_cache_entry *entry);
|
||||||
static inline int mb_cache_entry_put(struct mb_cache *cache,
|
static inline int mb_cache_entry_put(struct mb_cache *cache,
|
||||||
struct mb_cache_entry *entry)
|
struct mb_cache_entry *entry)
|
||||||
{
|
{
|
||||||
if (!atomic_dec_and_test(&entry->e_refcnt))
|
unsigned int cnt = atomic_dec_return(&entry->e_refcnt);
|
||||||
|
|
||||||
|
if (cnt > 0) {
|
||||||
|
if (cnt <= 3)
|
||||||
|
wake_up_var(&entry->e_refcnt);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
__mb_cache_entry_free(entry);
|
__mb_cache_entry_free(entry);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct mb_cache_entry *mb_cache_entry_delete_or_get(struct mb_cache *cache,
|
||||||
|
u32 key, u64 value);
|
||||||
void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
|
void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
|
||||||
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
|
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
|
||||||
u64 value);
|
u64 value);
|
||||||
|
Loading…
Reference in New Issue
Block a user