cipso: Fix data-races around sysctl.

[ Upstream commit dd44f04b9214adb68ef5684ae87a81ba03632250 ]

While reading cipso sysctl variables, they can be changed concurrently.
So, we need to add READ_ONCE() to avoid data-races.

Fixes: 446fda4f26 ("[NetLabel]: CIPSOv4 engine")
Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
Acked-by: Paul Moore <paul@paul-moore.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Kuniyuki Iwashima 2022-07-06 16:40:01 -07:00 committed by Greg Kroah-Hartman
parent 861f1852af
commit 0e41a0f73c
2 changed files with 8 additions and 6 deletions

View File

@ -876,7 +876,7 @@ cipso_cache_enable - BOOLEAN
cipso_cache_bucket_size - INTEGER cipso_cache_bucket_size - INTEGER
The CIPSO label cache consists of a fixed size hash table with each The CIPSO label cache consists of a fixed size hash table with each
hash bucket containing a number of cache entries. This variable limits hash bucket containing a number of cache entries. This variable limits
the number of entries in each hash bucket; the larger the value the the number of entries in each hash bucket; the larger the value is, the
more CIPSO label mappings that can be cached. When the number of more CIPSO label mappings that can be cached. When the number of
entries in a given hash bucket reaches this limit adding new entries entries in a given hash bucket reaches this limit adding new entries
causes the oldest entry in the bucket to be removed to make room. causes the oldest entry in the bucket to be removed to make room.

View File

@ -240,7 +240,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
struct cipso_v4_map_cache_entry *prev_entry = NULL; struct cipso_v4_map_cache_entry *prev_entry = NULL;
u32 hash; u32 hash;
if (!cipso_v4_cache_enabled) if (!READ_ONCE(cipso_v4_cache_enabled))
return -ENOENT; return -ENOENT;
hash = cipso_v4_map_cache_hash(key, key_len); hash = cipso_v4_map_cache_hash(key, key_len);
@ -297,13 +297,14 @@ static int cipso_v4_cache_check(const unsigned char *key,
int cipso_v4_cache_add(const unsigned char *cipso_ptr, int cipso_v4_cache_add(const unsigned char *cipso_ptr,
const struct netlbl_lsm_secattr *secattr) const struct netlbl_lsm_secattr *secattr)
{ {
int bkt_size = READ_ONCE(cipso_v4_cache_bucketsize);
int ret_val = -EPERM; int ret_val = -EPERM;
u32 bkt; u32 bkt;
struct cipso_v4_map_cache_entry *entry = NULL; struct cipso_v4_map_cache_entry *entry = NULL;
struct cipso_v4_map_cache_entry *old_entry = NULL; struct cipso_v4_map_cache_entry *old_entry = NULL;
u32 cipso_ptr_len; u32 cipso_ptr_len;
if (!cipso_v4_cache_enabled || cipso_v4_cache_bucketsize <= 0) if (!READ_ONCE(cipso_v4_cache_enabled) || bkt_size <= 0)
return 0; return 0;
cipso_ptr_len = cipso_ptr[1]; cipso_ptr_len = cipso_ptr[1];
@ -323,7 +324,7 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1); bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock); spin_lock_bh(&cipso_v4_cache[bkt].lock);
if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) { if (cipso_v4_cache[bkt].size < bkt_size) {
list_add(&entry->list, &cipso_v4_cache[bkt].list); list_add(&entry->list, &cipso_v4_cache[bkt].list);
cipso_v4_cache[bkt].size += 1; cipso_v4_cache[bkt].size += 1;
} else { } else {
@ -1200,7 +1201,8 @@ static int cipso_v4_gentag_rbm(const struct cipso_v4_doi *doi_def,
/* This will send packets using the "optimized" format when /* This will send packets using the "optimized" format when
* possible as specified in section 3.4.2.6 of the * possible as specified in section 3.4.2.6 of the
* CIPSO draft. */ * CIPSO draft. */
if (cipso_v4_rbm_optfmt && ret_val > 0 && ret_val <= 10) if (READ_ONCE(cipso_v4_rbm_optfmt) && ret_val > 0 &&
ret_val <= 10)
tag_len = 14; tag_len = 14;
else else
tag_len = 4 + ret_val; tag_len = 4 + ret_val;
@ -1603,7 +1605,7 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
* all the CIPSO validations here but it doesn't * all the CIPSO validations here but it doesn't
* really specify _exactly_ what we need to validate * really specify _exactly_ what we need to validate
* ... so, just make it a sysctl tunable. */ * ... so, just make it a sysctl tunable. */
if (cipso_v4_rbm_strictvalid) { if (READ_ONCE(cipso_v4_rbm_strictvalid)) {
if (cipso_v4_map_lvl_valid(doi_def, if (cipso_v4_map_lvl_valid(doi_def,
tag[3]) < 0) { tag[3]) < 0) {
err_offset = opt_iter + 3; err_offset = opt_iter + 3;