android_kernel_xiaomi_sm8350/net/bridge/br_forward.c
Herbert Xu 4906f9985e bridge: Fix LRO crash with tun
> Kernel BUG at drivers/net/tun.c:444
> invalid opcode: 0000 [1] SMP
> last sysfs file: /class/net/lo/ifindex
> CPU 0
> Modules linked in: tun ipt_MASQUERADE iptable_nat ip_nat xt_state ip_conntrack
> nfnetlink ipt_REJECT xt_tcpudp iptable_filter d
> Pid: 6912, comm: qemu-kvm Tainted: G      2.6.18-128.el5 #1
> RIP: 0010:[<ffffffff886f57b0>]  [<ffffffff886f57b0>]
> :tun:tun_chr_readv+0x2b1/0x3a6
> RSP: 0018:ffff8102202c5e48  EFLAGS: 00010246
> RAX: 0000000000000000 RBX: ffff8102202c5e98 RCX: 0000000004010000
> RDX: ffff810227063680 RSI: ffff8102202c5e9e RDI: ffff8102202c5e92
> RBP: 0000000000010ff6 R08: 0000000000000000 R09: 0000000000000001
> R10: ffff8102202c5e94 R11: 0000000000000202 R12: ffff8102275357c0
> R13: ffff81022755e500 R14: 0000000000000000 R15: ffff8102202c5ef8
> FS:  00002ae4398db980(0000) GS:ffffffff803ac000(0000) knlGS:0000000000000000
> CS:  0010 DS: 0000 ES: 0000 CR0: 000000008005003b
> CR2: 00002ae4ab514000 CR3: 0000000221344000 CR4: 00000000000026e0
> Process qemu-kvm (pid: 6912, threadinfo ffff8102202c4000, task
> ffff81022e58d820)
> Stack:  00000000498735cb ffff810229d1a3c0 0000000000000000 ffff81022e58d820
>  ffffffff8008a461 ffff81022755e528 ffff81022755e528 ffffffff8009f925
>  000005ea05ea0000 ffff8102209d0000 00001051143e1600 ffffffff8003c00e
> Call Trace:
>  [<ffffffff8008a461>] default_wake_function+0x0/0xe
>  [<ffffffff8009f925>] enqueue_hrtimer+0x55/0x70
>  [<ffffffff8003c00e>] hrtimer_start+0xbc/0xce
>  [<ffffffff886f58bf>] :tun:tun_chr_read+0x1a/0x1f
>  [<ffffffff8000b3f3>] vfs_read+0xcb/0x171
>  [<ffffffff800117d4>] sys_read+0x45/0x6e
>  [<ffffffff8005d116>] system_call+0x7e/0x83
>
>
> Code: 0f 0b 68 40 62 6f 88 c2 bc 01 f6 42 0a 08 74 0c 80 4c 24 41
> RIP  [<ffffffff886f57b0>] :tun:tun_chr_readv+0x2b1/0x3a6
>  RSP <ffff8102202c5e48>
>  <0>Kernel panic - not syncing: Fatal exception

This crashed when an LRO packet generated by bnx2x reached a
tun device through the bridge.  We're supposed to drop it at
the bridge.  However, because the check was placed in br_forward
instead of __br_forward, it's only effective if we are sending
the packet through a single port.

This patch fixes it by moving the check into __br_forward.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-02-09 15:07:18 -08:00

153 lines
3.2 KiB
C

/*
* Forwarding decision
* Linux ethernet bridge
*
* Authors:
* Lennert Buytenhek <buytenh@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/netfilter_bridge.h>
#include "br_private.h"
/* Don't forward packets to originating port or forwarding diasabled */
static inline int should_deliver(const struct net_bridge_port *p,
const struct sk_buff *skb)
{
return (skb->dev != p->dev && p->state == BR_STATE_FORWARDING);
}
static inline unsigned packet_length(const struct sk_buff *skb)
{
return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
}
int br_dev_queue_push_xmit(struct sk_buff *skb)
{
/* drop mtu oversized packets except gso */
if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
kfree_skb(skb);
else {
/* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
if (nf_bridge_maybe_copy_header(skb))
kfree_skb(skb);
else {
skb_push(skb, ETH_HLEN);
dev_queue_xmit(skb);
}
}
return 0;
}
int br_forward_finish(struct sk_buff *skb)
{
return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
br_dev_queue_push_xmit);
}
static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
skb->dev = to->dev;
NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
br_forward_finish);
}
static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
struct net_device *indev;
if (skb_warn_if_lro(skb)) {
kfree_skb(skb);
return;
}
indev = skb->dev;
skb->dev = to->dev;
skb_forward_csum(skb);
NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
br_forward_finish);
}
/* called with rcu_read_lock */
void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
{
if (should_deliver(to, skb)) {
__br_deliver(to, skb);
return;
}
kfree_skb(skb);
}
/* called with rcu_read_lock */
void br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
{
if (should_deliver(to, skb)) {
__br_forward(to, skb);
return;
}
kfree_skb(skb);
}
/* called under bridge lock */
static void br_flood(struct net_bridge *br, struct sk_buff *skb,
void (*__packet_hook)(const struct net_bridge_port *p,
struct sk_buff *skb))
{
struct net_bridge_port *p;
struct net_bridge_port *prev;
prev = NULL;
list_for_each_entry_rcu(p, &br->port_list, list) {
if (should_deliver(p, skb)) {
if (prev != NULL) {
struct sk_buff *skb2;
if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
br->dev->stats.tx_dropped++;
kfree_skb(skb);
return;
}
__packet_hook(prev, skb2);
}
prev = p;
}
}
if (prev != NULL) {
__packet_hook(prev, skb);
return;
}
kfree_skb(skb);
}
/* called with rcu_read_lock */
void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
{
br_flood(br, skb, __br_deliver);
}
/* called under bridge lock */
void br_flood_forward(struct net_bridge *br, struct sk_buff *skb)
{
br_flood(br, skb, __br_forward);
}