6edafaaf6f
If the following packet flow happen, kernel will panic. MathineA MathineB SYN ----------------------> SYN+ACK <---------------------- ACK(bad seq) ----------------------> When a bad seq ACK is received, tcp_v4_md5_do_lookup(skb->sk, ip_hdr(skb)->daddr)) is finally called by tcp_v4_reqsk_send_ack(), but the first parameter(skb->sk) is NULL at that moment, so kernel panic happens. This patch fixes this bug. OOPS output is as following: [ 302.812793] IP: [<c05cfaa6>] tcp_v4_md5_do_lookup+0x12/0x42 [ 302.817075] Oops: 0000 [#1] SMP [ 302.819815] Modules linked in: ipv6 loop dm_multipath rtc_cmos rtc_core rtc_lib pcspkr pcnet32 mii i2c_piix4 parport_pc i2c_core parport ac button ata_piix libata dm_mod mptspi mptscsih mptbase scsi_transport_spi sd_mod scsi_mod crc_t10dif ext3 jbd mbcache uhci_hcd ohci_hcd ehci_hcd [last unloaded: scsi_wait_scan] [ 302.849946] [ 302.851198] Pid: 0, comm: swapper Not tainted (2.6.27-rc1-guijf #5) [ 302.855184] EIP: 0060:[<c05cfaa6>] EFLAGS: 00010296 CPU: 0 [ 302.858296] EIP is at tcp_v4_md5_do_lookup+0x12/0x42 [ 302.861027] EAX: 0000001e EBX: 00000000 ECX: 00000046 EDX: 00000046 [ 302.864867] ESI: ceb69e00 EDI: 1467a8c0 EBP: cf75f180 ESP: c0792e54 [ 302.868333] DS: 007b ES: 007b FS: 00d8 GS: 0000 SS: 0068 [ 302.871287] Process swapper (pid: 0, ti=c0792000 task=c0712340 task.ti=c0746000) [ 302.875592] Stack: c06f413a 00000000 cf75f180 ceb69e00 00000000 c05d0d86 000016d0 ceac5400 [ 302.883275] c05d28f8 000016d0 ceb69e00 ceb69e20 681bf6e3 00001000 00000000 0a67a8c0 [ 302.890971] ceac5400 c04250a3 c06f413a c0792eb0 c0792edc cf59a620 cf59a620 cf59a634 [ 302.900140] Call Trace: [ 302.902392] [<c05d0d86>] tcp_v4_reqsk_send_ack+0x17/0x35 [ 302.907060] [<c05d28f8>] tcp_check_req+0x156/0x372 [ 302.910082] [<c04250a3>] printk+0x14/0x18 [ 302.912868] [<c05d0aa1>] tcp_v4_do_rcv+0x1d3/0x2bf [ 302.917423] [<c05d26be>] tcp_v4_rcv+0x563/0x5b9 [ 302.920453] [<c05bb20f>] ip_local_deliver_finish+0xe8/0x183 [ 302.923865] [<c05bb10a>] ip_rcv_finish+0x286/0x2a3 [ 302.928569] [<c059e438>] dev_alloc_skb+0x11/0x25 [ 302.931563] [<c05a211f>] netif_receive_skb+0x2d6/0x33a [ 302.934914] [<d0917941>] pcnet32_poll+0x333/0x680 [pcnet32] [ 302.938735] [<c05a3b48>] net_rx_action+0x5c/0xfe [ 302.941792] [<c042856b>] __do_softirq+0x5d/0xc1 [ 302.944788] [<c042850e>] __do_softirq+0x0/0xc1 [ 302.948999] [<c040564b>] do_softirq+0x55/0x88 [ 302.951870] [<c04501b1>] handle_fasteoi_irq+0x0/0xa4 [ 302.954986] [<c04284da>] irq_exit+0x35/0x69 [ 302.959081] [<c0405717>] do_IRQ+0x99/0xae [ 302.961896] [<c040422b>] common_interrupt+0x23/0x28 [ 302.966279] [<c040819d>] default_idle+0x2a/0x3d [ 302.969212] [<c0402552>] cpu_idle+0xb2/0xd2 [ 302.972169] ======================= [ 302.974274] Code: fc ff 84 d2 0f 84 df fd ff ff e9 34 fe ff ff 83 c4 0c 5b 5e 5f 5d c3 90 90 57 89 d7 56 53 89 c3 50 68 3a 41 6f c0 e8 e9 55 e5 ff <8b> 93 9c 04 00 00 58 85 d2 59 74 1e 8b 72 10 31 db 31 c9 85 f6 [ 303.011610] EIP: [<c05cfaa6>] tcp_v4_md5_do_lookup+0x12/0x42 SS:ESP 0068:c0792e54 [ 303.018360] Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> Signed-off-by: David S. Miller <davem@davemloft.net>
318 lines
9.0 KiB
C
318 lines
9.0 KiB
C
/*
|
|
* net/dccp/minisocks.c
|
|
*
|
|
* An implementation of the DCCP protocol
|
|
* Arnaldo Carvalho de Melo <acme@conectiva.com.br>
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public License
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the License, or (at your option) any later version.
|
|
*/
|
|
|
|
#include <linux/dccp.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/timer.h>
|
|
|
|
#include <net/sock.h>
|
|
#include <net/xfrm.h>
|
|
#include <net/inet_timewait_sock.h>
|
|
|
|
#include "ackvec.h"
|
|
#include "ccid.h"
|
|
#include "dccp.h"
|
|
#include "feat.h"
|
|
|
|
struct inet_timewait_death_row dccp_death_row = {
|
|
.sysctl_max_tw_buckets = NR_FILE * 2,
|
|
.period = DCCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS,
|
|
.death_lock = __SPIN_LOCK_UNLOCKED(dccp_death_row.death_lock),
|
|
.hashinfo = &dccp_hashinfo,
|
|
.tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0,
|
|
(unsigned long)&dccp_death_row),
|
|
.twkill_work = __WORK_INITIALIZER(dccp_death_row.twkill_work,
|
|
inet_twdr_twkill_work),
|
|
/* Short-time timewait calendar */
|
|
|
|
.twcal_hand = -1,
|
|
.twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0,
|
|
(unsigned long)&dccp_death_row),
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_death_row);
|
|
|
|
void dccp_minisock_init(struct dccp_minisock *dmsk)
|
|
{
|
|
dmsk->dccpms_sequence_window = sysctl_dccp_feat_sequence_window;
|
|
dmsk->dccpms_rx_ccid = sysctl_dccp_feat_rx_ccid;
|
|
dmsk->dccpms_tx_ccid = sysctl_dccp_feat_tx_ccid;
|
|
dmsk->dccpms_ack_ratio = sysctl_dccp_feat_ack_ratio;
|
|
dmsk->dccpms_send_ack_vector = sysctl_dccp_feat_send_ack_vector;
|
|
dmsk->dccpms_send_ndp_count = sysctl_dccp_feat_send_ndp_count;
|
|
}
|
|
|
|
void dccp_time_wait(struct sock *sk, int state, int timeo)
|
|
{
|
|
struct inet_timewait_sock *tw = NULL;
|
|
|
|
if (dccp_death_row.tw_count < dccp_death_row.sysctl_max_tw_buckets)
|
|
tw = inet_twsk_alloc(sk, state);
|
|
|
|
if (tw != NULL) {
|
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
|
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
|
|
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
|
|
if (tw->tw_family == PF_INET6) {
|
|
const struct ipv6_pinfo *np = inet6_sk(sk);
|
|
struct inet6_timewait_sock *tw6;
|
|
|
|
tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot);
|
|
tw6 = inet6_twsk((struct sock *)tw);
|
|
ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr);
|
|
ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr);
|
|
tw->tw_ipv6only = np->ipv6only;
|
|
}
|
|
#endif
|
|
/* Linkage updates. */
|
|
__inet_twsk_hashdance(tw, sk, &dccp_hashinfo);
|
|
|
|
/* Get the TIME_WAIT timeout firing. */
|
|
if (timeo < rto)
|
|
timeo = rto;
|
|
|
|
tw->tw_timeout = DCCP_TIMEWAIT_LEN;
|
|
if (state == DCCP_TIME_WAIT)
|
|
timeo = DCCP_TIMEWAIT_LEN;
|
|
|
|
inet_twsk_schedule(tw, &dccp_death_row, timeo,
|
|
DCCP_TIMEWAIT_LEN);
|
|
inet_twsk_put(tw);
|
|
} else {
|
|
/* Sorry, if we're out of memory, just CLOSE this
|
|
* socket up. We've got bigger problems than
|
|
* non-graceful socket closings.
|
|
*/
|
|
DCCP_WARN("time wait bucket table overflow\n");
|
|
}
|
|
|
|
dccp_done(sk);
|
|
}
|
|
|
|
struct sock *dccp_create_openreq_child(struct sock *sk,
|
|
const struct request_sock *req,
|
|
const struct sk_buff *skb)
|
|
{
|
|
/*
|
|
* Step 3: Process LISTEN state
|
|
*
|
|
* (* Generate a new socket and switch to that socket *)
|
|
* Set S := new socket for this port pair
|
|
*/
|
|
struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC);
|
|
|
|
if (newsk != NULL) {
|
|
const struct dccp_request_sock *dreq = dccp_rsk(req);
|
|
struct inet_connection_sock *newicsk = inet_csk(newsk);
|
|
struct dccp_sock *newdp = dccp_sk(newsk);
|
|
struct dccp_minisock *newdmsk = dccp_msk(newsk);
|
|
|
|
newdp->dccps_role = DCCP_ROLE_SERVER;
|
|
newdp->dccps_hc_rx_ackvec = NULL;
|
|
newdp->dccps_service_list = NULL;
|
|
newdp->dccps_service = dreq->dreq_service;
|
|
newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo;
|
|
newdp->dccps_timestamp_time = dreq->dreq_timestamp_time;
|
|
newicsk->icsk_rto = DCCP_TIMEOUT_INIT;
|
|
|
|
if (dccp_feat_clone(sk, newsk))
|
|
goto out_free;
|
|
|
|
if (newdmsk->dccpms_send_ack_vector) {
|
|
newdp->dccps_hc_rx_ackvec =
|
|
dccp_ackvec_alloc(GFP_ATOMIC);
|
|
if (unlikely(newdp->dccps_hc_rx_ackvec == NULL))
|
|
goto out_free;
|
|
}
|
|
|
|
newdp->dccps_hc_rx_ccid =
|
|
ccid_hc_rx_new(newdmsk->dccpms_rx_ccid,
|
|
newsk, GFP_ATOMIC);
|
|
newdp->dccps_hc_tx_ccid =
|
|
ccid_hc_tx_new(newdmsk->dccpms_tx_ccid,
|
|
newsk, GFP_ATOMIC);
|
|
if (unlikely(newdp->dccps_hc_rx_ccid == NULL ||
|
|
newdp->dccps_hc_tx_ccid == NULL)) {
|
|
dccp_ackvec_free(newdp->dccps_hc_rx_ackvec);
|
|
ccid_hc_rx_delete(newdp->dccps_hc_rx_ccid, newsk);
|
|
ccid_hc_tx_delete(newdp->dccps_hc_tx_ccid, newsk);
|
|
out_free:
|
|
/* It is still raw copy of parent, so invalidate
|
|
* destructor and make plain sk_free() */
|
|
newsk->sk_destruct = NULL;
|
|
sk_free(newsk);
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Step 3: Process LISTEN state
|
|
*
|
|
* Choose S.ISS (initial seqno) or set from Init Cookies
|
|
* Initialize S.GAR := S.ISS
|
|
* Set S.ISR, S.GSR, S.SWL, S.SWH from packet or Init Cookies
|
|
*/
|
|
|
|
/* See dccp_v4_conn_request */
|
|
newdmsk->dccpms_sequence_window = req->rcv_wnd;
|
|
|
|
newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss;
|
|
dccp_update_gss(newsk, dreq->dreq_iss);
|
|
|
|
newdp->dccps_isr = dreq->dreq_isr;
|
|
dccp_update_gsr(newsk, dreq->dreq_isr);
|
|
|
|
/*
|
|
* SWL and AWL are initially adjusted so that they are not less than
|
|
* the initial Sequence Numbers received and sent, respectively:
|
|
* SWL := max(GSR + 1 - floor(W/4), ISR),
|
|
* AWL := max(GSS - W' + 1, ISS).
|
|
* These adjustments MUST be applied only at the beginning of the
|
|
* connection.
|
|
*/
|
|
dccp_set_seqno(&newdp->dccps_swl,
|
|
max48(newdp->dccps_swl, newdp->dccps_isr));
|
|
dccp_set_seqno(&newdp->dccps_awl,
|
|
max48(newdp->dccps_awl, newdp->dccps_iss));
|
|
|
|
dccp_init_xmit_timers(newsk);
|
|
|
|
DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS);
|
|
}
|
|
return newsk;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_create_openreq_child);
|
|
|
|
/*
|
|
* Process an incoming packet for RESPOND sockets represented
|
|
* as an request_sock.
|
|
*/
|
|
struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
|
|
struct request_sock *req,
|
|
struct request_sock **prev)
|
|
{
|
|
struct sock *child = NULL;
|
|
struct dccp_request_sock *dreq = dccp_rsk(req);
|
|
|
|
/* Check for retransmitted REQUEST */
|
|
if (dccp_hdr(skb)->dccph_type == DCCP_PKT_REQUEST) {
|
|
|
|
if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_isr)) {
|
|
dccp_pr_debug("Retransmitted REQUEST\n");
|
|
dreq->dreq_isr = DCCP_SKB_CB(skb)->dccpd_seq;
|
|
/*
|
|
* Send another RESPONSE packet
|
|
* To protect against Request floods, increment retrans
|
|
* counter (backoff, monitored by dccp_response_timer).
|
|
*/
|
|
req->retrans++;
|
|
req->rsk_ops->rtx_syn_ack(sk, req);
|
|
}
|
|
/* Network Duplicate, discard packet */
|
|
return NULL;
|
|
}
|
|
|
|
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_PACKET_ERROR;
|
|
|
|
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_ACK &&
|
|
dccp_hdr(skb)->dccph_type != DCCP_PKT_DATAACK)
|
|
goto drop;
|
|
|
|
/* Invalid ACK */
|
|
if (DCCP_SKB_CB(skb)->dccpd_ack_seq != dreq->dreq_iss) {
|
|
dccp_pr_debug("Invalid ACK number: ack_seq=%llu, "
|
|
"dreq_iss=%llu\n",
|
|
(unsigned long long)
|
|
DCCP_SKB_CB(skb)->dccpd_ack_seq,
|
|
(unsigned long long) dreq->dreq_iss);
|
|
goto drop;
|
|
}
|
|
|
|
if (dccp_parse_options(sk, dreq, skb))
|
|
goto drop;
|
|
|
|
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
|
|
if (child == NULL)
|
|
goto listen_overflow;
|
|
|
|
inet_csk_reqsk_queue_unlink(sk, req, prev);
|
|
inet_csk_reqsk_queue_removed(sk, req);
|
|
inet_csk_reqsk_queue_add(sk, req, child);
|
|
out:
|
|
return child;
|
|
listen_overflow:
|
|
dccp_pr_debug("listen_overflow!\n");
|
|
DCCP_SKB_CB(skb)->dccpd_reset_code = DCCP_RESET_CODE_TOO_BUSY;
|
|
drop:
|
|
if (dccp_hdr(skb)->dccph_type != DCCP_PKT_RESET)
|
|
req->rsk_ops->send_reset(sk, skb);
|
|
|
|
inet_csk_reqsk_queue_drop(sk, req, prev);
|
|
goto out;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_check_req);
|
|
|
|
/*
|
|
* Queue segment on the new socket if the new socket is active,
|
|
* otherwise we just shortcircuit this and continue with
|
|
* the new socket.
|
|
*/
|
|
int dccp_child_process(struct sock *parent, struct sock *child,
|
|
struct sk_buff *skb)
|
|
{
|
|
int ret = 0;
|
|
const int state = child->sk_state;
|
|
|
|
if (!sock_owned_by_user(child)) {
|
|
ret = dccp_rcv_state_process(child, skb, dccp_hdr(skb),
|
|
skb->len);
|
|
|
|
/* Wakeup parent, send SIGIO */
|
|
if (state == DCCP_RESPOND && child->sk_state != state)
|
|
parent->sk_data_ready(parent, 0);
|
|
} else {
|
|
/* Alas, it is possible again, because we do lookup
|
|
* in main socket hash table and lock on listening
|
|
* socket does not protect us more.
|
|
*/
|
|
sk_add_backlog(child, skb);
|
|
}
|
|
|
|
bh_unlock_sock(child);
|
|
sock_put(child);
|
|
return ret;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_child_process);
|
|
|
|
void dccp_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
|
|
struct request_sock *rsk)
|
|
{
|
|
DCCP_BUG("DCCP-ACK packets are never sent in LISTEN/RESPOND state");
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_reqsk_send_ack);
|
|
|
|
void dccp_reqsk_init(struct request_sock *req, struct sk_buff *skb)
|
|
{
|
|
struct dccp_request_sock *dreq = dccp_rsk(req);
|
|
|
|
inet_rsk(req)->rmt_port = dccp_hdr(skb)->dccph_sport;
|
|
inet_rsk(req)->acked = 0;
|
|
req->rcv_wnd = sysctl_dccp_feat_sequence_window;
|
|
dreq->dreq_timestamp_echo = 0;
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(dccp_reqsk_init);
|