net: sched: lock action when translating it to flow_action infra

[ Upstream commit 7a47281439ba00b11fc098f36695522184ce5a82 ]

In order to remove dependency on rtnl lock, take action's tcfa_lock when
constructing its representation as flow_action_entry structure.

Refactor tcf_sample_get_group() to assume that caller holds tcf_lock and
don't take it manually. This callback is only called from flow_action infra
representation translator which now calls it with tcf_lock held, so this
refactoring is necessary to prevent deadlock.

Allocate memory with GFP_ATOMIC flag for ip_tunnel_info copy because
tcf_tunnel_info_copy() is only called from flow_action representation infra
code with tcf_lock spinlock taken.

Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Vlad Buslov 2020-02-17 12:12:09 +02:00 committed by Greg Kroah-Hartman
parent e7660f9535
commit 066a637d1c
3 changed files with 12 additions and 9 deletions

View File

@ -69,7 +69,7 @@ tcf_tunnel_info_copy(const struct tc_action *a)
if (tun) {
size_t tun_size = sizeof(*tun) + tun->options_len;
struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size,
GFP_KERNEL);
GFP_ATOMIC);
return tun_copy;
}

View File

@ -265,14 +265,12 @@ tcf_sample_get_group(const struct tc_action *a,
struct tcf_sample *s = to_sample(a);
struct psample_group *group;
spin_lock_bh(&s->tcf_lock);
group = rcu_dereference_protected(s->psample_group,
lockdep_is_held(&s->tcf_lock));
if (group) {
psample_group_take(group);
*destructor = tcf_psample_group_put;
}
spin_unlock_bh(&s->tcf_lock);
return group;
}

View File

@ -3436,7 +3436,7 @@ static void tcf_sample_get_group(struct flow_action_entry *entry,
int tc_setup_flow_action(struct flow_action *flow_action,
const struct tcf_exts *exts, bool rtnl_held)
{
const struct tc_action *act;
struct tc_action *act;
int i, j, k, err = 0;
if (!exts)
@ -3450,6 +3450,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
struct flow_action_entry *entry;
entry = &flow_action->entries[j];
spin_lock_bh(&act->tcfa_lock);
if (is_tcf_gact_ok(act)) {
entry->id = FLOW_ACTION_ACCEPT;
} else if (is_tcf_gact_shot(act)) {
@ -3490,13 +3491,13 @@ int tc_setup_flow_action(struct flow_action *flow_action,
break;
default:
err = -EOPNOTSUPP;
goto err_out;
goto err_out_locked;
}
} else if (is_tcf_tunnel_set(act)) {
entry->id = FLOW_ACTION_TUNNEL_ENCAP;
err = tcf_tunnel_encap_get_tunnel(entry, act);
if (err)
goto err_out;
goto err_out_locked;
} else if (is_tcf_tunnel_release(act)) {
entry->id = FLOW_ACTION_TUNNEL_DECAP;
} else if (is_tcf_pedit(act)) {
@ -3510,7 +3511,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
break;
default:
err = -EOPNOTSUPP;
goto err_out;
goto err_out_locked;
}
entry->mangle.htype = tcf_pedit_htype(act, k);
entry->mangle.mask = tcf_pedit_mask(act, k);
@ -3561,15 +3562,16 @@ int tc_setup_flow_action(struct flow_action *flow_action,
entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
break;
default:
goto err_out;
goto err_out_locked;
}
} else if (is_tcf_skbedit_ptype(act)) {
entry->id = FLOW_ACTION_PTYPE;
entry->ptype = tcf_skbedit_ptype(act);
} else {
err = -EOPNOTSUPP;
goto err_out;
goto err_out_locked;
}
spin_unlock_bh(&act->tcfa_lock);
if (!is_tcf_pedit(act))
j++;
@ -3583,6 +3585,9 @@ err_out:
tc_cleanup_flow_action(flow_action);
return err;
err_out_locked:
spin_unlock_bh(&act->tcfa_lock);
goto err_out;
}
EXPORT_SYMBOL(tc_setup_flow_action);