android_kernel_xiaomi_sm8350/include/net/inet6_hashtables.h
Eric Dumazet 81c3d5470e [INET]: speedup inet (tcp/dccp) lookups
Arnaldo and I agreed it could be applied now, because I have other
pending patches depending on this one (Thank you Arnaldo)

(The other important patch moves skc_refcnt in a separate cache line,
so that the SMP/NUMA performance doesnt suffer from cache line ping pongs)

1) First some performance data :
--------------------------------

tcp_v4_rcv() wastes a *lot* of time in __inet_lookup_established()

The most time critical code is :

sk_for_each(sk, node, &head->chain) {
     if (INET_MATCH(sk, acookie, saddr, daddr, ports, dif))
         goto hit; /* You sunk my battleship! */
}

The sk_for_each() does use prefetch() hints but only the begining of
"struct sock" is prefetched.

As INET_MATCH first comparison uses inet_sk(__sk)->daddr, wich is far
away from the begining of "struct sock", it has to bring into CPU
cache cold cache line. Each iteration has to use at least 2 cache
lines.

This can be problematic if some chains are very long.

2) The goal
-----------

The idea I had is to change things so that INET_MATCH() may return
FALSE in 99% of cases only using the data already in the CPU cache,
using one cache line per iteration.

3) Description of the patch
---------------------------

Adds a new 'unsigned int skc_hash' field in 'struct sock_common',
filling a 32 bits hole on 64 bits platform.

struct sock_common {
	unsigned short		skc_family;
	volatile unsigned char	skc_state;
	unsigned char		skc_reuse;
	int			skc_bound_dev_if;
	struct hlist_node	skc_node;
	struct hlist_node	skc_bind_node;
	atomic_t		skc_refcnt;
+	unsigned int		skc_hash;
	struct proto		*skc_prot;
};

Store in this 32 bits field the full hash, not masked by (ehash_size -
1) Using this full hash as the first comparison done in INET_MATCH
permits us immediatly skip the element without touching a second cache
line in case of a miss.

Suppress the sk_hashent/tw_hashent fields since skc_hash (aliased to
sk_hash and tw_hash) already contains the slot number if we mask with
(ehash_size - 1)

File include/net/inet_hashtables.h

64 bits platforms :
#define INET_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
     (((__sk)->sk_hash == (__hash))
     ((*((__u64 *)&(inet_sk(__sk)->daddr)))== (__cookie))   &&  \
     ((*((__u32 *)&(inet_sk(__sk)->dport))) == (__ports))   &&  \
     (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))

32bits platforms:
#define TCP_IPV4_MATCH(__sk, __hash, __cookie, __saddr, __daddr, __ports, __dif)\
     (((__sk)->sk_hash == (__hash))                 &&  \
     (inet_sk(__sk)->daddr          == (__saddr))   &&  \
     (inet_sk(__sk)->rcv_saddr      == (__daddr))   &&  \
     (!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))


- Adds a prefetch(head->chain.first) in 
__inet_lookup_established()/__tcp_v4_check_established() and 
__inet6_lookup_established()/__tcp_v6_check_established() and 
__dccp_v4_check_established() to bring into cache the first element of the 
list, before the {read|write}_lock(&head->lock);

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Acked-by: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-10-03 14:13:38 -07:00

130 lines
4.0 KiB
C

/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Authors: Lotsa people, from code originally in tcp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _INET6_HASHTABLES_H
#define _INET6_HASHTABLES_H
#include <linux/config.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
#include <linux/in6.h>
#include <linux/ipv6.h>
#include <linux/types.h>
#include <net/ipv6.h>
struct inet_hashinfo;
/* I have no idea if this is a good hash for v6 or not. -DaveM */
static inline unsigned int inet6_ehashfn(const struct in6_addr *laddr, const u16 lport,
const struct in6_addr *faddr, const u16 fport)
{
unsigned int hashent = (lport ^ fport);
hashent ^= (laddr->s6_addr32[3] ^ faddr->s6_addr32[3]);
hashent ^= hashent >> 16;
hashent ^= hashent >> 8;
return hashent;
}
static inline int inet6_sk_ehashfn(const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
const struct in6_addr *laddr = &np->rcv_saddr;
const struct in6_addr *faddr = &np->daddr;
const __u16 lport = inet->num;
const __u16 fport = inet->dport;
return inet6_ehashfn(laddr, lport, faddr, fport);
}
/*
* Sockets in TCP_CLOSE state are _always_ taken out of the hash, so
* we need not check it for TCP lookups anymore, thanks Alexey. -DaveM
*
* The sockhash lock must be held as a reader here.
*/
static inline struct sock *
__inet6_lookup_established(struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr,
const u16 sport,
const struct in6_addr *daddr,
const u16 hnum,
const int dif)
{
struct sock *sk;
const struct hlist_node *node;
const __u32 ports = INET_COMBINED_PORTS(sport, hnum);
/* Optimize here for direct hit, only listening connections can
* have wildcards anyways.
*/
unsigned int hash = inet6_ehashfn(daddr, hnum, saddr, sport);
struct inet_ehash_bucket *head = inet_ehash_bucket(hashinfo, hash);
prefetch(head->chain.first);
read_lock(&head->lock);
sk_for_each(sk, node, &head->chain) {
/* For IPV6 do the cheaper port and family tests first. */
if (INET6_MATCH(sk, hash, saddr, daddr, ports, dif))
goto hit; /* You sunk my battleship! */
}
/* Must check for a TIME_WAIT'er before going to listener hash. */
sk_for_each(sk, node, &(head + hashinfo->ehash_size)->chain) {
const struct inet_timewait_sock *tw = inet_twsk(sk);
if(*((__u32 *)&(tw->tw_dport)) == ports &&
sk->sk_family == PF_INET6) {
const struct tcp6_timewait_sock *tcp6tw = tcp6_twsk(sk);
if (ipv6_addr_equal(&tcp6tw->tw_v6_daddr, saddr) &&
ipv6_addr_equal(&tcp6tw->tw_v6_rcv_saddr, daddr) &&
(!sk->sk_bound_dev_if || sk->sk_bound_dev_if == dif))
goto hit;
}
}
read_unlock(&head->lock);
return NULL;
hit:
sock_hold(sk);
read_unlock(&head->lock);
return sk;
}
extern struct sock *inet6_lookup_listener(struct inet_hashinfo *hashinfo,
const struct in6_addr *daddr,
const unsigned short hnum,
const int dif);
static inline struct sock *__inet6_lookup(struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr,
const u16 sport,
const struct in6_addr *daddr,
const u16 hnum,
const int dif)
{
struct sock *sk = __inet6_lookup_established(hashinfo, saddr, sport,
daddr, hnum, dif);
if (sk)
return sk;
return inet6_lookup_listener(hashinfo, daddr, hnum, dif);
}
extern struct sock *inet6_lookup(struct inet_hashinfo *hashinfo,
const struct in6_addr *saddr, const u16 sport,
const struct in6_addr *daddr, const u16 dport,
const int dif);
#endif /* defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) */
#endif /* _INET6_HASHTABLES_H */