net: add atomic_long_t to net_device_stats fields

[ Upstream commit 6c1c5097781f563b70a81683ea6fdac21637573b ]

Long standing KCSAN issues are caused by data-race around
some dev->stats changes.

Most performance critical paths already use per-cpu
variables, or per-queue ones.

It is reasonable (and more correct) to use atomic operations
for the slow paths.

This patch adds an union for each field of net_device_stats,
so that we can convert paths that are not yet protected
by a spinlock or a mutex.

netdev_stats_to_stats64() no longer has an #if BITS_PER_LONG==64

Note that the memcpy() we were using on 64bit arches
had no provision to avoid load-tearing,
while atomic_long_read() is providing the needed protection
at no cost.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Eric Dumazet 2022-11-15 08:53:55 +00:00 committed by Greg Kroah-Hartman
parent 19d5a0e17a
commit 4bdcc6a5aa
3 changed files with 40 additions and 37 deletions

View File

@ -160,31 +160,38 @@ static inline bool dev_xmit_complete(int rc)
* (unsigned long) so they can be read and written atomically. * (unsigned long) so they can be read and written atomically.
*/ */
#define NET_DEV_STAT(FIELD) \
union { \
unsigned long FIELD; \
atomic_long_t __##FIELD; \
}
struct net_device_stats { struct net_device_stats {
unsigned long rx_packets; NET_DEV_STAT(rx_packets);
unsigned long tx_packets; NET_DEV_STAT(tx_packets);
unsigned long rx_bytes; NET_DEV_STAT(rx_bytes);
unsigned long tx_bytes; NET_DEV_STAT(tx_bytes);
unsigned long rx_errors; NET_DEV_STAT(rx_errors);
unsigned long tx_errors; NET_DEV_STAT(tx_errors);
unsigned long rx_dropped; NET_DEV_STAT(rx_dropped);
unsigned long tx_dropped; NET_DEV_STAT(tx_dropped);
unsigned long multicast; NET_DEV_STAT(multicast);
unsigned long collisions; NET_DEV_STAT(collisions);
unsigned long rx_length_errors; NET_DEV_STAT(rx_length_errors);
unsigned long rx_over_errors; NET_DEV_STAT(rx_over_errors);
unsigned long rx_crc_errors; NET_DEV_STAT(rx_crc_errors);
unsigned long rx_frame_errors; NET_DEV_STAT(rx_frame_errors);
unsigned long rx_fifo_errors; NET_DEV_STAT(rx_fifo_errors);
unsigned long rx_missed_errors; NET_DEV_STAT(rx_missed_errors);
unsigned long tx_aborted_errors; NET_DEV_STAT(tx_aborted_errors);
unsigned long tx_carrier_errors; NET_DEV_STAT(tx_carrier_errors);
unsigned long tx_fifo_errors; NET_DEV_STAT(tx_fifo_errors);
unsigned long tx_heartbeat_errors; NET_DEV_STAT(tx_heartbeat_errors);
unsigned long tx_window_errors; NET_DEV_STAT(tx_window_errors);
unsigned long rx_compressed; NET_DEV_STAT(rx_compressed);
unsigned long tx_compressed; NET_DEV_STAT(tx_compressed);
}; };
#undef NET_DEV_STAT
#include <linux/cache.h> #include <linux/cache.h>
@ -4936,4 +4943,9 @@ do { \
extern struct net_device *blackhole_netdev; extern struct net_device *blackhole_netdev;
/* Note: Avoid these macros in fast path, prefer per-cpu or per-queue counters. */
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */ #endif /* _LINUX_NETDEVICE_H */

View File

@ -357,9 +357,8 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev, static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
struct net *net) struct net *net)
{ {
/* TODO : stats should be SMP safe */ DEV_STATS_INC(dev, rx_packets);
dev->stats.rx_packets++; DEV_STATS_ADD(dev, rx_bytes, skb->len);
dev->stats.rx_bytes += skb->len;
__skb_tunnel_rx(skb, dev, net); __skb_tunnel_rx(skb, dev, net);
} }

View File

@ -9461,24 +9461,16 @@ void netdev_run_todo(void)
void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64, void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
const struct net_device_stats *netdev_stats) const struct net_device_stats *netdev_stats)
{ {
#if BITS_PER_LONG == 64 size_t i, n = sizeof(*netdev_stats) / sizeof(atomic_long_t);
BUILD_BUG_ON(sizeof(*stats64) < sizeof(*netdev_stats)); const atomic_long_t *src = (atomic_long_t *)netdev_stats;
memcpy(stats64, netdev_stats, sizeof(*netdev_stats));
/* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + sizeof(*netdev_stats), 0,
sizeof(*stats64) - sizeof(*netdev_stats));
#else
size_t i, n = sizeof(*netdev_stats) / sizeof(unsigned long);
const unsigned long *src = (const unsigned long *)netdev_stats;
u64 *dst = (u64 *)stats64; u64 *dst = (u64 *)stats64;
BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64)); BUILD_BUG_ON(n > sizeof(*stats64) / sizeof(u64));
for (i = 0; i < n; i++) for (i = 0; i < n; i++)
dst[i] = src[i]; dst[i] = atomic_long_read(&src[i]);
/* zero out counters that only exist in rtnl_link_stats64 */ /* zero out counters that only exist in rtnl_link_stats64 */
memset((char *)stats64 + n * sizeof(u64), 0, memset((char *)stats64 + n * sizeof(u64), 0,
sizeof(*stats64) - n * sizeof(u64)); sizeof(*stats64) - n * sizeof(u64));
#endif
} }
EXPORT_SYMBOL(netdev_stats_to_stats64); EXPORT_SYMBOL(netdev_stats_to_stats64);