netpoll: allow cleanup to be synchronous
This fixes a problem introduced by:
commit 2cde6acd49
("netpoll: Fix __netpoll_rcu_free so that it can hold the rtnl lock")
When using netconsole on a bond, __netpoll_cleanup can asynchronously
recurse multiple times, each __netpoll_free_async call can result in
more __netpoll_free_async's. This means there is now a race between
cleanup_work queues on multiple netpoll_info's on multiple devices and
the configuration of a new netpoll. For example if a netconsole is set
to enable 0, reconfigured, and enable 1 immediately, this netconsole
will likely not work.
Given the reason for __netpoll_free_async is it can be called when rtnl
is not locked, if it is locked, we should be able to execute
synchronously. It appears to be locked everywhere it's called from.
Generalize the design pattern from the teaming driver for current
callers of __netpoll_free_async.
CC: Neil Horman <nhorman@tuxdriver.com>
CC: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Debabrata Banerjee <dbanerje@akamai.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
2e2d6f0342
commit
c9fbd71f73
@ -963,7 +963,8 @@ static inline void slave_disable_netpoll(struct slave *slave)
|
||||
return;
|
||||
|
||||
slave->np = NULL;
|
||||
__netpoll_free_async(np);
|
||||
|
||||
__netpoll_free(np);
|
||||
}
|
||||
|
||||
static void bond_poll_controller(struct net_device *bond_dev)
|
||||
|
@ -1077,7 +1077,7 @@ static void macvlan_dev_netpoll_cleanup(struct net_device *dev)
|
||||
|
||||
vlan->netpoll = NULL;
|
||||
|
||||
__netpoll_free_async(netpoll);
|
||||
__netpoll_free(netpoll);
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
|
@ -1104,10 +1104,7 @@ static void team_port_disable_netpoll(struct team_port *port)
|
||||
return;
|
||||
port->np = NULL;
|
||||
|
||||
/* Wait for transmitting packets to finish before freeing. */
|
||||
synchronize_rcu_bh();
|
||||
__netpoll_cleanup(np);
|
||||
kfree(np);
|
||||
__netpoll_free(np);
|
||||
}
|
||||
#else
|
||||
static int team_port_enable_netpoll(struct team_port *port)
|
||||
|
@ -31,8 +31,6 @@ struct netpoll {
|
||||
bool ipv6;
|
||||
u16 local_port, remote_port;
|
||||
u8 remote_mac[ETH_ALEN];
|
||||
|
||||
struct work_struct cleanup_work;
|
||||
};
|
||||
|
||||
struct netpoll_info {
|
||||
@ -63,7 +61,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt);
|
||||
int __netpoll_setup(struct netpoll *np, struct net_device *ndev);
|
||||
int netpoll_setup(struct netpoll *np);
|
||||
void __netpoll_cleanup(struct netpoll *np);
|
||||
void __netpoll_free_async(struct netpoll *np);
|
||||
void __netpoll_free(struct netpoll *np);
|
||||
void netpoll_cleanup(struct netpoll *np);
|
||||
void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
|
@ -756,8 +756,7 @@ static void vlan_dev_netpoll_cleanup(struct net_device *dev)
|
||||
return;
|
||||
|
||||
vlan->netpoll = NULL;
|
||||
|
||||
__netpoll_free_async(netpoll);
|
||||
__netpoll_free(netpoll);
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
|
@ -344,7 +344,7 @@ void br_netpoll_disable(struct net_bridge_port *p)
|
||||
|
||||
p->np = NULL;
|
||||
|
||||
__netpoll_free_async(np);
|
||||
__netpoll_free(np);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -57,7 +57,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu);
|
||||
MAX_UDP_CHUNK)
|
||||
|
||||
static void zap_completion_queue(void);
|
||||
static void netpoll_async_cleanup(struct work_struct *work);
|
||||
|
||||
static unsigned int carrier_timeout = 4;
|
||||
module_param(carrier_timeout, uint, 0644);
|
||||
@ -589,7 +588,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
|
||||
|
||||
np->dev = ndev;
|
||||
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
|
||||
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
|
||||
|
||||
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
|
||||
np_err(np, "%s doesn't support polling, aborting\n",
|
||||
@ -788,10 +786,6 @@ void __netpoll_cleanup(struct netpoll *np)
|
||||
{
|
||||
struct netpoll_info *npinfo;
|
||||
|
||||
/* rtnl_dereference would be preferable here but
|
||||
* rcu_cleanup_netpoll path can put us in here safely without
|
||||
* holding the rtnl, so plain rcu_dereference it is
|
||||
*/
|
||||
npinfo = rtnl_dereference(np->dev->npinfo);
|
||||
if (!npinfo)
|
||||
return;
|
||||
@ -812,21 +806,16 @@ void __netpoll_cleanup(struct netpoll *np)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__netpoll_cleanup);
|
||||
|
||||
static void netpoll_async_cleanup(struct work_struct *work)
|
||||
void __netpoll_free(struct netpoll *np)
|
||||
{
|
||||
struct netpoll *np = container_of(work, struct netpoll, cleanup_work);
|
||||
ASSERT_RTNL();
|
||||
|
||||
rtnl_lock();
|
||||
/* Wait for transmitting packets to finish before freeing. */
|
||||
synchronize_rcu_bh();
|
||||
__netpoll_cleanup(np);
|
||||
rtnl_unlock();
|
||||
kfree(np);
|
||||
}
|
||||
|
||||
void __netpoll_free_async(struct netpoll *np)
|
||||
{
|
||||
schedule_work(&np->cleanup_work);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__netpoll_free_async);
|
||||
EXPORT_SYMBOL_GPL(__netpoll_free);
|
||||
|
||||
void netpoll_cleanup(struct netpoll *np)
|
||||
{
|
||||
|
@ -722,7 +722,7 @@ static void dsa_slave_netpoll_cleanup(struct net_device *dev)
|
||||
|
||||
p->netpoll = NULL;
|
||||
|
||||
__netpoll_free_async(netpoll);
|
||||
__netpoll_free(netpoll);
|
||||
}
|
||||
|
||||
static void dsa_slave_poll_controller(struct net_device *dev)
|
||||
|
Loading…
Reference in New Issue
Block a user