[INET]: Generalise the tcp_listen_ lock routines
[firefly-linux-kernel-4.4.55.git] / net / ipv4 / tcp_ipv4.c
index dad98e4a50431906f3b98ec3725cd9204813363a..5f9ad95304cae143eb837b5e7a007aa3f8075460 100644 (file)
@@ -36,7 +36,7 @@
  *                                     ACK bit.
  *             Andi Kleen :            Implemented fast path mtu discovery.
  *                                     Fixed many serious bugs in the
- *                                     open_request handling and moved
+ *                                     request_sock handling and moved
  *                                     most of it into the af independent code.
  *                                     Added tail drop and some other bugfixes.
  *                                     Added new listen sematics.
@@ -64,6 +64,7 @@
 #include <linux/times.h>
 
 #include <net/icmp.h>
+#include <net/inet_hashtables.h>
 #include <net/tcp.h>
 #include <net/ipv6.h>
 #include <net/inet_common.h>
@@ -88,12 +89,12 @@ static struct socket *tcp_socket;
 void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
                       struct sk_buff *skb);
 
-struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
-       .__tcp_lhash_lock       =       RW_LOCK_UNLOCKED,
-       .__tcp_lhash_users      =       ATOMIC_INIT(0),
-       .__tcp_lhash_wait
-         = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.__tcp_lhash_wait),
-       .__tcp_portalloc_lock   =       SPIN_LOCK_UNLOCKED
+struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
+       .lhash_lock     = RW_LOCK_UNLOCKED,
+       .lhash_users    = ATOMIC_INIT(0),
+       .lhash_wait     = __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),
+       .portalloc_lock = SPIN_LOCK_UNLOCKED,
+       .port_rover     = 1024 - 1,
 };
 
 /*
@@ -102,84 +103,8 @@ struct tcp_hashinfo __cacheline_aligned tcp_hashinfo = {
  * 32768-61000
  */
 int sysctl_local_port_range[2] = { 1024, 4999 };
-int tcp_port_rover = 1024 - 1;
 
-static __inline__ int tcp_hashfn(__u32 laddr, __u16 lport,
-                                __u32 faddr, __u16 fport)
-{
-       int h = (laddr ^ lport) ^ (faddr ^ fport);
-       h ^= h >> 16;
-       h ^= h >> 8;
-       return h & (tcp_ehash_size - 1);
-}
-
-static __inline__ int tcp_sk_hashfn(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       __u32 laddr = inet->rcv_saddr;
-       __u16 lport = inet->num;
-       __u32 faddr = inet->daddr;
-       __u16 fport = inet->dport;
-
-       return tcp_hashfn(laddr, lport, faddr, fport);
-}
-
-/* Allocate and initialize a new TCP local port bind bucket.
- * The bindhash mutex for snum's hash chain must be held here.
- */
-struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
-                                         unsigned short snum)
-{
-       struct tcp_bind_bucket *tb = kmem_cache_alloc(tcp_bucket_cachep,
-                                                     SLAB_ATOMIC);
-       if (tb) {
-               tb->port = snum;
-               tb->fastreuse = 0;
-               INIT_HLIST_HEAD(&tb->owners);
-               hlist_add_head(&tb->node, &head->chain);
-       }
-       return tb;
-}
-
-/* Caller must hold hashbucket lock for this tb with local BH disabled */
-void tcp_bucket_destroy(struct tcp_bind_bucket *tb)
-{
-       if (hlist_empty(&tb->owners)) {
-               __hlist_del(&tb->node);
-               kmem_cache_free(tcp_bucket_cachep, tb);
-       }
-}
-
-/* Caller must disable local BH processing. */
-static __inline__ void __tcp_inherit_port(struct sock *sk, struct sock *child)
-{
-       struct tcp_bind_hashbucket *head =
-                               &tcp_bhash[tcp_bhashfn(inet_sk(child)->num)];
-       struct tcp_bind_bucket *tb;
-
-       spin_lock(&head->lock);
-       tb = tcp_sk(sk)->bind_hash;
-       sk_add_bind_node(child, &tb->owners);
-       tcp_sk(child)->bind_hash = tb;
-       spin_unlock(&head->lock);
-}
-
-inline void tcp_inherit_port(struct sock *sk, struct sock *child)
-{
-       local_bh_disable();
-       __tcp_inherit_port(sk, child);
-       local_bh_enable();
-}
-
-void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
-                  unsigned short snum)
-{
-       inet_sk(sk)->num = snum;
-       sk_add_bind_node(sk, &tb->owners);
-       tcp_sk(sk)->bind_hash = tb;
-}
-
-static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
+static inline int tcp_bind_conflict(struct sock *sk, struct inet_bind_bucket *tb)
 {
        const u32 sk_rcv_saddr = tcp_v4_rcv_saddr(sk);
        struct sock *sk2;
@@ -209,9 +134,9 @@ static inline int tcp_bind_conflict(struct sock *sk, struct tcp_bind_bucket *tb)
  */
 static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
 {
-       struct tcp_bind_hashbucket *head;
+       struct inet_bind_hashbucket *head;
        struct hlist_node *node;
-       struct tcp_bind_bucket *tb;
+       struct inet_bind_bucket *tb;
        int ret;
 
        local_bh_disable();
@@ -221,30 +146,35 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
                int remaining = (high - low) + 1;
                int rover;
 
-               spin_lock(&tcp_portalloc_lock);
-               if (tcp_port_rover < low)
+               spin_lock(&tcp_hashinfo.portalloc_lock);
+               if (tcp_hashinfo.port_rover < low)
                        rover = low;
                else
-                       rover = tcp_port_rover;
+                       rover = tcp_hashinfo.port_rover;
                do {
                        rover++;
                        if (rover > high)
                                rover = low;
-                       head = &tcp_bhash[tcp_bhashfn(rover)];
+                       head = &tcp_hashinfo.bhash[inet_bhashfn(rover, tcp_hashinfo.bhash_size)];
                        spin_lock(&head->lock);
-                       tb_for_each(tb, node, &head->chain)
+                       inet_bind_bucket_for_each(tb, node, &head->chain)
                                if (tb->port == rover)
                                        goto next;
                        break;
                next:
                        spin_unlock(&head->lock);
                } while (--remaining > 0);
-               tcp_port_rover = rover;
-               spin_unlock(&tcp_portalloc_lock);
-
-               /* Exhausted local port range during search? */
+               tcp_hashinfo.port_rover = rover;
+               spin_unlock(&tcp_hashinfo.portalloc_lock);
+
+               /* Exhausted local port range during search?  It is not
+                * possible for us to be holding one of the bind hash
+                * locks if this test triggers, because if 'remaining'
+                * drops to zero, we broke out of the do/while loop at
+                * the top level, not from the 'break;' statement.
+                */
                ret = 1;
-               if (remaining <= 0)
+               if (unlikely(remaining <= 0))
                        goto fail;
 
                /* OK, here is the one we will use.  HEAD is
@@ -252,9 +182,9 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum)
                 */
                snum = rover;
        } else {
-               head = &tcp_bhash[tcp_bhashfn(snum)];
+               head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
                spin_lock(&head->lock);
-               tb_for_each(tb, node, &head->chain)
+               inet_bind_bucket_for_each(tb, node, &head->chain)
                        if (tb->port == snum)
                                goto tb_found;
        }
@@ -275,7 +205,7 @@ tb_found:
        }
 tb_not_found:
        ret = 1;
-       if (!tb && (tb = tcp_bucket_create(head, snum)) == NULL)
+       if (!tb && (tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, snum)) == NULL)
                goto fail_unlock;
        if (hlist_empty(&tb->owners)) {
                if (sk->sk_reuse && sk->sk_state != TCP_LISTEN)
@@ -286,9 +216,9 @@ tb_not_found:
                   (!sk->sk_reuse || sk->sk_state == TCP_LISTEN))
                tb->fastreuse = 0;
 success:
-       if (!tcp_sk(sk)->bind_hash)
-               tcp_bind_hash(sk, tb, snum);
-       BUG_TRAP(tcp_sk(sk)->bind_hash == tb);
+       if (!inet_sk(sk)->bind_hash)
+               inet_bind_hash(sk, tb, snum);
+       BUG_TRAP(inet_sk(sk)->bind_hash == tb);
        ret = 0;
 
 fail_unlock:
@@ -298,86 +228,11 @@ fail:
        return ret;
 }
 
-/* Get rid of any references to a local port held by the
- * given sock.
- */
-static void __tcp_put_port(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct tcp_bind_hashbucket *head = &tcp_bhash[tcp_bhashfn(inet->num)];
-       struct tcp_bind_bucket *tb;
-
-       spin_lock(&head->lock);
-       tb = tcp_sk(sk)->bind_hash;
-       __sk_del_bind_node(sk);
-       tcp_sk(sk)->bind_hash = NULL;
-       inet->num = 0;
-       tcp_bucket_destroy(tb);
-       spin_unlock(&head->lock);
-}
-
-void tcp_put_port(struct sock *sk)
-{
-       local_bh_disable();
-       __tcp_put_port(sk);
-       local_bh_enable();
-}
-
-/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it can be very bad on SMP.
- * Look, when several writers sleep and reader wakes them up, all but one
- * immediately hit write lock and grab all the cpus. Exclusive sleep solves
- * this, _but_ remember, it adds useless work on UP machines (wake up each
- * exclusive lock release). It should be ifdefed really.
- */
-
-void tcp_listen_wlock(void)
-{
-       write_lock(&tcp_lhash_lock);
-
-       if (atomic_read(&tcp_lhash_users)) {
-               DEFINE_WAIT(wait);
-
-               for (;;) {
-                       prepare_to_wait_exclusive(&tcp_lhash_wait,
-                                               &wait, TASK_UNINTERRUPTIBLE);
-                       if (!atomic_read(&tcp_lhash_users))
-                               break;
-                       write_unlock_bh(&tcp_lhash_lock);
-                       schedule();
-                       write_lock_bh(&tcp_lhash_lock);
-               }
-
-               finish_wait(&tcp_lhash_wait, &wait);
-       }
-}
-
-static __inline__ void __tcp_v4_hash(struct sock *sk, const int listen_possible)
-{
-       struct hlist_head *list;
-       rwlock_t *lock;
-
-       BUG_TRAP(sk_unhashed(sk));
-       if (listen_possible && sk->sk_state == TCP_LISTEN) {
-               list = &tcp_listening_hash[tcp_sk_listen_hashfn(sk)];
-               lock = &tcp_lhash_lock;
-               tcp_listen_wlock();
-       } else {
-               list = &tcp_ehash[(sk->sk_hashent = tcp_sk_hashfn(sk))].chain;
-               lock = &tcp_ehash[sk->sk_hashent].lock;
-               write_lock(lock);
-       }
-       __sk_add_node(sk, list);
-       sock_prot_inc_use(sk->sk_prot);
-       write_unlock(lock);
-       if (listen_possible && sk->sk_state == TCP_LISTEN)
-               wake_up(&tcp_lhash_wait);
-}
-
 static void tcp_v4_hash(struct sock *sk)
 {
        if (sk->sk_state != TCP_CLOSE) {
                local_bh_disable();
-               __tcp_v4_hash(sk, 1);
+               __inet_hash(&tcp_hashinfo, sk, 1);
                local_bh_enable();
        }
 }
@@ -391,10 +246,10 @@ void tcp_unhash(struct sock *sk)
 
        if (sk->sk_state == TCP_LISTEN) {
                local_bh_disable();
-               tcp_listen_wlock();
-               lock = &tcp_lhash_lock;
+               inet_listen_wlock(&tcp_hashinfo);
+               lock = &tcp_hashinfo.lhash_lock;
        } else {
-               struct tcp_ehash_bucket *head = &tcp_ehash[sk->sk_hashent];
+               struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[sk->sk_hashent];
                lock = &head->lock;
                write_lock_bh(&head->lock);
        }
@@ -405,7 +260,7 @@ void tcp_unhash(struct sock *sk)
 
  ende:
        if (sk->sk_state == TCP_LISTEN)
-               wake_up(&tcp_lhash_wait);
+               wake_up(&tcp_hashinfo.lhash_wait);
 }
 
 /* Don't inline this cruft.  Here are some nice properties to
@@ -414,8 +269,10 @@ void tcp_unhash(struct sock *sk)
  * connection.  So always assume those are both wildcarded
  * during the search since they can never be otherwise.
  */
-static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
-                                            unsigned short hnum, int dif)
+static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head,
+                                            const u32 daddr,
+                                            const unsigned short hnum,
+                                            const int dif)
 {
        struct sock *result = NULL, *sk;
        struct hlist_node *node;
@@ -451,14 +308,15 @@ static struct sock *__tcp_v4_lookup_listener(struct hlist_head *head, u32 daddr,
 }
 
 /* Optimize the common listener case. */
-static inline struct sock *tcp_v4_lookup_listener(u32 daddr,
-               unsigned short hnum, int dif)
+static inline struct sock *tcp_v4_lookup_listener(const u32 daddr,
+                                                 const unsigned short hnum,
+                                                 const int dif)
 {
        struct sock *sk = NULL;
        struct hlist_head *head;
 
-       read_lock(&tcp_lhash_lock);
-       head = &tcp_listening_hash[tcp_lhashfn(hnum)];
+       read_lock(&tcp_hashinfo.lhash_lock);
+       head = &tcp_hashinfo.listening_hash[inet_lhashfn(hnum)];
        if (!hlist_empty(head)) {
                struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
 
@@ -473,7 +331,7 @@ static inline struct sock *tcp_v4_lookup_listener(u32 daddr,
 sherry_cache:
                sock_hold(sk);
        }
-       read_unlock(&tcp_lhash_lock);
+       read_unlock(&tcp_hashinfo.lhash_lock);
        return sk;
 }
 
@@ -483,11 +341,13 @@ sherry_cache:
  * Local BH must be disabled here.
  */
 
-static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
-                                                      u32 daddr, u16 hnum,
-                                                      int dif)
+static inline struct sock *__tcp_v4_lookup_established(const u32 saddr,
+                                                      const u16 sport,
+                                                      const u32 daddr,
+                                                      const u16 hnum,
+                                                      const int dif)
 {
-       struct tcp_ehash_bucket *head;
+       struct inet_ehash_bucket *head;
        TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
        __u32 ports = TCP_COMBINED_PORTS(sport, hnum);
        struct sock *sk;
@@ -495,8 +355,8 @@ static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
        /* Optimize here for direct hit, only listening connections can
         * have wildcards anyways.
         */
-       int hash = tcp_hashfn(daddr, hnum, saddr, sport);
-       head = &tcp_ehash[hash];
+       const int hash = inet_ehashfn(daddr, hnum, saddr, sport, tcp_hashinfo.ehash_size);
+       head = &tcp_hashinfo.ehash[hash];
        read_lock(&head->lock);
        sk_for_each(sk, node, &head->chain) {
                if (TCP_IPV4_MATCH(sk, acookie, saddr, daddr, ports, dif))
@@ -504,7 +364,7 @@ static inline struct sock *__tcp_v4_lookup_established(u32 saddr, u16 sport,
        }
 
        /* Must check for a TIME_WAIT'er before going to listener hash. */
-       sk_for_each(sk, node, &(head + tcp_ehash_size)->chain) {
+       sk_for_each(sk, node, &(head + tcp_hashinfo.ehash_size)->chain) {
                if (TCP_IPV4_TW_MATCH(sk, acookie, saddr, daddr, ports, dif))
                        goto hit;
        }
@@ -558,8 +418,8 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
        int dif = sk->sk_bound_dev_if;
        TCP_V4_ADDR_COOKIE(acookie, saddr, daddr)
        __u32 ports = TCP_COMBINED_PORTS(inet->dport, lport);
-       int hash = tcp_hashfn(daddr, lport, saddr, inet->dport);
-       struct tcp_ehash_bucket *head = &tcp_ehash[hash];
+       const int hash = inet_ehashfn(daddr, lport, saddr, inet->dport, tcp_hashinfo.ehash_size);
+       struct inet_ehash_bucket *head = &tcp_hashinfo.ehash[hash];
        struct sock *sk2;
        struct hlist_node *node;
        struct tcp_tw_bucket *tw;
@@ -567,7 +427,7 @@ static int __tcp_v4_check_established(struct sock *sk, __u16 lport,
        write_lock(&head->lock);
 
        /* Check TIME-WAIT sockets first. */
-       sk_for_each(sk2, node, &(head + tcp_ehash_size)->chain) {
+       sk_for_each(sk2, node, &(head + tcp_hashinfo.ehash_size)->chain) {
                tw = (struct tcp_tw_bucket *)sk2;
 
                if (TCP_IPV4_TW_MATCH(sk2, acookie, saddr, daddr, ports, dif)) {
@@ -652,9 +512,9 @@ static inline u32 connect_port_offset(const struct sock *sk)
  */
 static inline int tcp_v4_hash_connect(struct sock *sk)
 {
-       unsigned short snum = inet_sk(sk)->num;
-       struct tcp_bind_hashbucket *head;
-       struct tcp_bind_bucket *tb;
+       const unsigned short snum = inet_sk(sk)->num;
+       struct inet_bind_hashbucket *head;
+       struct inet_bind_bucket *tb;
        int ret;
 
        if (!snum) {
@@ -671,14 +531,14 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
                local_bh_disable();
                for (i = 1; i <= range; i++) {
                        port = low + (i + offset) % range;
-                       head = &tcp_bhash[tcp_bhashfn(port)];
+                       head = &tcp_hashinfo.bhash[inet_bhashfn(port, tcp_hashinfo.bhash_size)];
                        spin_lock(&head->lock);
 
                        /* Does not bother with rcv_saddr checks,
                         * because the established check is already
                         * unique enough.
                         */
-                       tb_for_each(tb, node, &head->chain) {
+                       inet_bind_bucket_for_each(tb, node, &head->chain) {
                                if (tb->port == port) {
                                        BUG_TRAP(!hlist_empty(&tb->owners));
                                        if (tb->fastreuse >= 0)
@@ -691,7 +551,7 @@ static inline int tcp_v4_hash_connect(struct sock *sk)
                                }
                        }
 
-                       tb = tcp_bucket_create(head, port);
+                       tb = inet_bind_bucket_create(tcp_hashinfo.bind_bucket_cachep, head, port);
                        if (!tb) {
                                spin_unlock(&head->lock);
                                break;
@@ -710,10 +570,10 @@ ok:
                hint += i;
 
                /* Head lock still held and bh's disabled */
-               tcp_bind_hash(sk, tb, port);
+               inet_bind_hash(sk, tb, port);
                if (sk_unhashed(sk)) {
                        inet_sk(sk)->sport = htons(port);
-                       __tcp_v4_hash(sk, 0);
+                       __inet_hash(&tcp_hashinfo, sk, 0);
                }
                spin_unlock(&head->lock);
 
@@ -726,11 +586,11 @@ ok:
                goto out;
        }
 
-       head  = &tcp_bhash[tcp_bhashfn(snum)];
-       tb  = tcp_sk(sk)->bind_hash;
+       head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
+       tb  = inet_sk(sk)->bind_hash;
        spin_lock_bh(&head->lock);
        if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
-               __tcp_v4_hash(sk, 0);
+               __inet_hash(&tcp_hashinfo, sk, 0);
                spin_unlock_bh(&head->lock);
                return 0;
        } else {
@@ -832,8 +692,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                goto failure;
 
        /* OK, now commit destination to socket.  */
-       __sk_dst_set(sk, &rt->u.dst);
-       tcp_v4_setup_caps(sk, &rt->u.dst);
+       sk_setup_caps(sk, &rt->u.dst);
 
        if (!tp->write_seq)
                tp->write_seq = secure_tcp_sequence_number(inet->saddr,
@@ -869,21 +728,23 @@ static __inline__ u32 tcp_v4_synq_hash(u32 raddr, u16 rport, u32 rnd)
        return (jhash_2words(raddr, (u32) rport, rnd) & (TCP_SYNQ_HSIZE - 1));
 }
 
-static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
-                                             struct open_request ***prevp,
+static struct request_sock *tcp_v4_search_req(struct tcp_sock *tp,
+                                             struct request_sock ***prevp,
                                              __u16 rport,
                                              __u32 raddr, __u32 laddr)
 {
-       struct tcp_listen_opt *lopt = tp->listen_opt;
-       struct open_request *req, **prev;
+       struct listen_sock *lopt = tp->accept_queue.listen_opt;
+       struct request_sock *req, **prev;
 
        for (prev = &lopt->syn_table[tcp_v4_synq_hash(raddr, rport, lopt->hash_rnd)];
             (req = *prev) != NULL;
             prev = &req->dl_next) {
-               if (req->rmt_port == rport &&
-                   req->af.v4_req.rmt_addr == raddr &&
-                   req->af.v4_req.loc_addr == laddr &&
-                   TCP_INET_FAMILY(req->class->family)) {
+               const struct inet_request_sock *ireq = inet_rsk(req);
+
+               if (ireq->rmt_port == rport &&
+                   ireq->rmt_addr == raddr &&
+                   ireq->loc_addr == laddr &&
+                   TCP_INET_FAMILY(req->rsk_ops->family)) {
                        BUG_TRAP(!req->sk);
                        *prevp = prev;
                        break;
@@ -893,21 +754,13 @@ static struct open_request *tcp_v4_search_req(struct tcp_sock *tp,
        return req;
 }
 
-static void tcp_v4_synq_add(struct sock *sk, struct open_request *req)
+static void tcp_v4_synq_add(struct sock *sk, struct request_sock *req)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcp_listen_opt *lopt = tp->listen_opt;
-       u32 h = tcp_v4_synq_hash(req->af.v4_req.rmt_addr, req->rmt_port, lopt->hash_rnd);
-
-       req->expires = jiffies + TCP_TIMEOUT_INIT;
-       req->retrans = 0;
-       req->sk = NULL;
-       req->dl_next = lopt->syn_table[h];
-
-       write_lock(&tp->syn_wait_lock);
-       lopt->syn_table[h] = req;
-       write_unlock(&tp->syn_wait_lock);
+       struct listen_sock *lopt = tp->accept_queue.listen_opt;
+       u32 h = tcp_v4_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
 
+       reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
        tcp_synq_added(sk);
 }
 
@@ -1050,7 +903,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
        }
 
        switch (sk->sk_state) {
-               struct open_request *req, **prev;
+               struct request_sock *req, **prev;
        case TCP_LISTEN:
                if (sock_owned_by_user(sk))
                        goto out;
@@ -1065,7 +918,7 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
                 */
                BUG_TRAP(!req->sk);
 
-               if (seq != req->snt_isn) {
+               if (seq != tcp_rsk(req)->snt_isn) {
                        NET_INC_STATS_BH(LINUX_MIB_OUTOFWINDOWICMPS);
                        goto out;
                }
@@ -1254,28 +1107,29 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
        tcp_tw_put(tw);
 }
 
-static void tcp_v4_or_send_ack(struct sk_buff *skb, struct open_request *req)
+static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
 {
-       tcp_v4_send_ack(skb, req->snt_isn + 1, req->rcv_isn + 1, req->rcv_wnd,
+       tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
                        req->ts_recent);
 }
 
 static struct dst_entry* tcp_v4_route_req(struct sock *sk,
-                                         struct open_request *req)
+                                         struct request_sock *req)
 {
        struct rtable *rt;
-       struct ip_options *opt = req->af.v4_req.opt;
+       const struct inet_request_sock *ireq = inet_rsk(req);
+       struct ip_options *opt = inet_rsk(req)->opt;
        struct flowi fl = { .oif = sk->sk_bound_dev_if,
                            .nl_u = { .ip4_u =
                                      { .daddr = ((opt && opt->srr) ?
                                                  opt->faddr :
-                                                 req->af.v4_req.rmt_addr),
-                                       .saddr = req->af.v4_req.loc_addr,
+                                                 ireq->rmt_addr),
+                                       .saddr = ireq->loc_addr,
                                        .tos = RT_CONN_FLAGS(sk) } },
                            .proto = IPPROTO_TCP,
                            .uli_u = { .ports =
                                       { .sport = inet_sk(sk)->sport,
-                                        .dport = req->rmt_port } } };
+                                        .dport = ireq->rmt_port } } };
 
        if (ip_route_output_flow(&rt, &fl, sk, 0)) {
                IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES);
@@ -1291,12 +1145,13 @@ static struct dst_entry* tcp_v4_route_req(struct sock *sk,
 
 /*
  *     Send a SYN-ACK after having received an ACK.
- *     This still operates on a open_request only, not on a big
+ *     This still operates on a request_sock only, not on a big
  *     socket.
  */
-static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
+static int tcp_v4_send_synack(struct sock *sk, struct request_sock *req,
                              struct dst_entry *dst)
 {
+       const struct inet_request_sock *ireq = inet_rsk(req);
        int err = -1;
        struct sk_buff * skb;
 
@@ -1310,14 +1165,14 @@ static int tcp_v4_send_synack(struct sock *sk, struct open_request *req,
                struct tcphdr *th = skb->h.th;
 
                th->check = tcp_v4_check(th, skb->len,
-                                        req->af.v4_req.loc_addr,
-                                        req->af.v4_req.rmt_addr,
+                                        ireq->loc_addr,
+                                        ireq->rmt_addr,
                                         csum_partial((char *)th, skb->len,
                                                      skb->csum));
 
-               err = ip_build_and_send_pkt(skb, sk, req->af.v4_req.loc_addr,
-                                           req->af.v4_req.rmt_addr,
-                                           req->af.v4_req.opt);
+               err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
+                                           ireq->rmt_addr,
+                                           ireq->opt);
                if (err == NET_XMIT_CN)
                        err = 0;
        }
@@ -1328,12 +1183,12 @@ out:
 }
 
 /*
- *     IPv4 open_request destructor.
+ *     IPv4 request_sock destructor.
  */
-static void tcp_v4_or_free(struct open_request *req)
+static void tcp_v4_reqsk_destructor(struct request_sock *req)
 {
-       if (req->af.v4_req.opt)
-               kfree(req->af.v4_req.opt);
+       if (inet_rsk(req)->opt)
+               kfree(inet_rsk(req)->opt);
 }
 
 static inline void syn_flood_warning(struct sk_buff *skb)
@@ -1349,7 +1204,7 @@ static inline void syn_flood_warning(struct sk_buff *skb)
 }
 
 /*
- * Save and compile IPv4 options into the open_request if needed.
+ * Save and compile IPv4 options into the request_sock if needed.
  */
 static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
                                                     struct sk_buff *skb)
@@ -1370,33 +1225,20 @@ static inline struct ip_options *tcp_v4_save_options(struct sock *sk,
        return dopt;
 }
 
-/*
- * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
- * One SYN_RECV socket costs about 80bytes on a 32bit machine.
- * It would be better to replace it with a global counter for all sockets
- * but then some measure against one socket starving all other sockets
- * would be needed.
- *
- * It was 128 by default. Experiments with real servers show, that
- * it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
- * Further increasing requires to change hash table size.
- */
-int sysctl_max_syn_backlog = 256;
-
-struct or_calltable or_ipv4 = {
+struct request_sock_ops tcp_request_sock_ops = {
        .family         =       PF_INET,
+       .obj_size       =       sizeof(struct tcp_request_sock),
        .rtx_syn_ack    =       tcp_v4_send_synack,
-       .send_ack       =       tcp_v4_or_send_ack,
-       .destructor     =       tcp_v4_or_free,
+       .send_ack       =       tcp_v4_reqsk_send_ack,
+       .destructor     =       tcp_v4_reqsk_destructor,
        .send_reset     =       tcp_v4_send_reset,
 };
 
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
+       struct inet_request_sock *ireq;
        struct tcp_options_received tmp_opt;
-       struct open_request *req;
+       struct request_sock *req;
        __u32 saddr = skb->nh.iph->saddr;
        __u32 daddr = skb->nh.iph->daddr;
        __u32 isn = TCP_SKB_CB(skb)->when;
@@ -1433,7 +1275,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
        if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
                goto drop;
 
-       req = tcp_openreq_alloc();
+       req = reqsk_alloc(&tcp_request_sock_ops);
        if (!req)
                goto drop;
 
@@ -1461,10 +1303,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 
        tcp_openreq_init(req, &tmp_opt, skb);
 
-       req->af.v4_req.loc_addr = daddr;
-       req->af.v4_req.rmt_addr = saddr;
-       req->af.v4_req.opt = tcp_v4_save_options(sk, skb);
-       req->class = &or_ipv4;
+       ireq = inet_rsk(req);
+       ireq->loc_addr = daddr;
+       ireq->rmt_addr = saddr;
+       ireq->opt = tcp_v4_save_options(sk, skb);
        if (!want_cookie)
                TCP_ECN_create_request(req, skb->h.th);
 
@@ -1511,32 +1353,31 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
                         * to destinations, already remembered
                         * to the moment of synflood.
                         */
-                       NETDEBUG(if (net_ratelimit()) \
-                                       printk(KERN_DEBUG "TCP: drop open "
-                                                         "request from %u.%u."
-                                                         "%u.%u/%u\n", \
-                                              NIPQUAD(saddr),
-                                              ntohs(skb->h.th->source)));
+                       LIMIT_NETDEBUG(printk(KERN_DEBUG "TCP: drop open "
+                                             "request from %u.%u."
+                                             "%u.%u/%u\n",
+                                             NIPQUAD(saddr),
+                                             ntohs(skb->h.th->source)));
                        dst_release(dst);
                        goto drop_and_free;
                }
 
                isn = tcp_v4_init_sequence(sk, skb);
        }
-       req->snt_isn = isn;
+       tcp_rsk(req)->snt_isn = isn;
 
        if (tcp_v4_send_synack(sk, req, dst))
                goto drop_and_free;
 
        if (want_cookie) {
-               tcp_openreq_free(req);
+               reqsk_free(req);
        } else {
                tcp_v4_synq_add(sk, req);
        }
        return 0;
 
 drop_and_free:
-       tcp_openreq_free(req);
+       reqsk_free(req);
 drop:
        TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
        return 0;
@@ -1548,9 +1389,10 @@ drop:
  * now create the new socket.
  */
 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
-                                 struct open_request *req,
+                                 struct request_sock *req,
                                  struct dst_entry *dst)
 {
+       struct inet_request_sock *ireq;
        struct inet_sock *newinet;
        struct tcp_sock *newtp;
        struct sock *newsk;
@@ -1565,16 +1407,16 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        if (!newsk)
                goto exit;
 
-       newsk->sk_dst_cache = dst;
-       tcp_v4_setup_caps(newsk, dst);
+       sk_setup_caps(newsk, dst);
 
        newtp                 = tcp_sk(newsk);
        newinet               = inet_sk(newsk);
-       newinet->daddr        = req->af.v4_req.rmt_addr;
-       newinet->rcv_saddr    = req->af.v4_req.loc_addr;
-       newinet->saddr        = req->af.v4_req.loc_addr;
-       newinet->opt          = req->af.v4_req.opt;
-       req->af.v4_req.opt    = NULL;
+       ireq                  = inet_rsk(req);
+       newinet->daddr        = ireq->rmt_addr;
+       newinet->rcv_saddr    = ireq->loc_addr;
+       newinet->saddr        = ireq->loc_addr;
+       newinet->opt          = ireq->opt;
+       ireq->opt             = NULL;
        newinet->mc_index     = tcp_v4_iif(skb);
        newinet->mc_ttl       = skb->nh.iph->ttl;
        newtp->ext_header_len = 0;
@@ -1586,8 +1428,8 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
        tcp_initialize_rcv_mss(newsk);
 
-       __tcp_v4_hash(newsk, 0);
-       __tcp_inherit_port(sk, newsk);
+       __inet_hash(&tcp_hashinfo, newsk, 0);
+       __inet_inherit_port(&tcp_hashinfo, sk, newsk);
 
        return newsk;
 
@@ -1605,9 +1447,9 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
        struct iphdr *iph = skb->nh.iph;
        struct tcp_sock *tp = tcp_sk(sk);
        struct sock *nsk;
-       struct open_request **prev;
+       struct request_sock **prev;
        /* Find possible connection requests. */
-       struct open_request *req = tcp_v4_search_req(tp, &prev, th->source,
+       struct request_sock *req = tcp_v4_search_req(tp, &prev, th->source,
                                                     iph->saddr, iph->daddr);
        if (req)
                return tcp_check_req(sk, skb, req, prev);
@@ -1642,8 +1484,7 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
                                  skb->nh.iph->daddr, skb->csum))
                        return 0;
 
-               NETDEBUG(if (net_ratelimit())
-                               printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
+               LIMIT_NETDEBUG(printk(KERN_DEBUG "hw tcp v4 csum failed\n"));
                skb->ip_summed = CHECKSUM_NONE;
        }
        if (skb->len <= 76) {
@@ -1846,112 +1687,6 @@ do_time_wait:
        goto discard_it;
 }
 
-/* With per-bucket locks this operation is not-atomic, so that
- * this version is not worse.
- */
-static void __tcp_v4_rehash(struct sock *sk)
-{
-       sk->sk_prot->unhash(sk);
-       sk->sk_prot->hash(sk);
-}
-
-static int tcp_v4_reselect_saddr(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       int err;
-       struct rtable *rt;
-       __u32 old_saddr = inet->saddr;
-       __u32 new_saddr;
-       __u32 daddr = inet->daddr;
-
-       if (inet->opt && inet->opt->srr)
-               daddr = inet->opt->faddr;
-
-       /* Query new route. */
-       err = ip_route_connect(&rt, daddr, 0,
-                              RT_CONN_FLAGS(sk),
-                              sk->sk_bound_dev_if,
-                              IPPROTO_TCP,
-                              inet->sport, inet->dport, sk);
-       if (err)
-               return err;
-
-       __sk_dst_set(sk, &rt->u.dst);
-       tcp_v4_setup_caps(sk, &rt->u.dst);
-
-       new_saddr = rt->rt_src;
-
-       if (new_saddr == old_saddr)
-               return 0;
-
-       if (sysctl_ip_dynaddr > 1) {
-               printk(KERN_INFO "tcp_v4_rebuild_header(): shifting inet->"
-                                "saddr from %d.%d.%d.%d to %d.%d.%d.%d\n",
-                      NIPQUAD(old_saddr),
-                      NIPQUAD(new_saddr));
-       }
-
-       inet->saddr = new_saddr;
-       inet->rcv_saddr = new_saddr;
-
-       /* XXX The only one ugly spot where we need to
-        * XXX really change the sockets identity after
-        * XXX it has entered the hashes. -DaveM
-        *
-        * Besides that, it does not check for connection
-        * uniqueness. Wait for troubles.
-        */
-       __tcp_v4_rehash(sk);
-       return 0;
-}
-
-int tcp_v4_rebuild_header(struct sock *sk)
-{
-       struct inet_sock *inet = inet_sk(sk);
-       struct rtable *rt = (struct rtable *)__sk_dst_check(sk, 0);
-       u32 daddr;
-       int err;
-
-       /* Route is OK, nothing to do. */
-       if (rt)
-               return 0;
-
-       /* Reroute. */
-       daddr = inet->daddr;
-       if (inet->opt && inet->opt->srr)
-               daddr = inet->opt->faddr;
-
-       {
-               struct flowi fl = { .oif = sk->sk_bound_dev_if,
-                                   .nl_u = { .ip4_u =
-                                             { .daddr = daddr,
-                                               .saddr = inet->saddr,
-                                               .tos = RT_CONN_FLAGS(sk) } },
-                                   .proto = IPPROTO_TCP,
-                                   .uli_u = { .ports =
-                                              { .sport = inet->sport,
-                                                .dport = inet->dport } } };
-                                               
-               err = ip_route_output_flow(&rt, &fl, sk, 0);
-       }
-       if (!err) {
-               __sk_dst_set(sk, &rt->u.dst);
-               tcp_v4_setup_caps(sk, &rt->u.dst);
-               return 0;
-       }
-
-       /* Routing failed... */
-       sk->sk_route_caps = 0;
-
-       if (!sysctl_ip_dynaddr ||
-           sk->sk_state != TCP_SYN_SENT ||
-           (sk->sk_userlocks & SOCK_BINDADDR_LOCK) ||
-           (err = tcp_v4_reselect_saddr(sk)) != 0)
-               sk->sk_err_soft = -err;
-
-       return err;
-}
-
 static void v4_addr2sockaddr(struct sock *sk, struct sockaddr * uaddr)
 {
        struct sockaddr_in *sin = (struct sockaddr_in *) uaddr;
@@ -2023,7 +1758,7 @@ int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw)
 struct tcp_func ipv4_specific = {
        .queue_xmit     =       ip_queue_xmit,
        .send_check     =       tcp_v4_send_check,
-       .rebuild_header =       tcp_v4_rebuild_header,
+       .rebuild_header =       inet_sk_rebuild_header,
        .conn_request   =       tcp_v4_conn_request,
        .syn_recv_sock  =       tcp_v4_syn_recv_sock,
        .remember_stamp =       tcp_v4_remember_stamp,
@@ -2060,9 +1795,10 @@ static int tcp_v4_init_sock(struct sock *sk)
         */
        tp->snd_ssthresh = 0x7fffffff;  /* Infinity */
        tp->snd_cwnd_clamp = ~0;
-       tp->mss_cache_std = tp->mss_cache = 536;
+       tp->mss_cache = 536;
 
        tp->reordering = sysctl_tcp_reordering;
+       tp->ca_ops = &tcp_init_congestion_ops;
 
        sk->sk_state = TCP_CLOSE;
 
@@ -2085,6 +1821,8 @@ int tcp_v4_destroy_sock(struct sock *sk)
 
        tcp_clear_xmit_timers(sk);
 
+       tcp_cleanup_congestion_control(tp);
+
        /* Cleanup up the write buffer. */
        sk_stream_writequeue_purge(sk);
 
@@ -2095,8 +1833,8 @@ int tcp_v4_destroy_sock(struct sock *sk)
        __skb_queue_purge(&tp->ucopy.prequeue);
 
        /* Clean up a referenced TCP bind bucket. */
-       if (tp->bind_hash)
-               tcp_put_port(sk);
+       if (inet_sk(sk)->bind_hash)
+               inet_put_port(&tcp_hashinfo, sk);
 
        /*
         * If sendmsg cached page exists, toss it.
@@ -2137,20 +1875,20 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
 
        if (!sk) {
                st->bucket = 0;
-               sk = sk_head(&tcp_listening_hash[0]);
+               sk = sk_head(&tcp_hashinfo.listening_hash[0]);
                goto get_sk;
        }
 
        ++st->num;
 
        if (st->state == TCP_SEQ_STATE_OPENREQ) {
-               struct open_request *req = cur;
+               struct request_sock *req = cur;
 
                tp = tcp_sk(st->syn_wait_sk);
                req = req->dl_next;
                while (1) {
                        while (req) {
-                               if (req->class->family == st->family) {
+                               if (req->rsk_ops->family == st->family) {
                                        cur = req;
                                        goto out;
                                }
@@ -2159,17 +1897,17 @@ static void *listening_get_next(struct seq_file *seq, void *cur)
                        if (++st->sbucket >= TCP_SYNQ_HSIZE)
                                break;
 get_req:
-                       req = tp->listen_opt->syn_table[st->sbucket];
+                       req = tp->accept_queue.listen_opt->syn_table[st->sbucket];
                }
                sk        = sk_next(st->syn_wait_sk);
                st->state = TCP_SEQ_STATE_LISTENING;
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
        } else {
                tp = tcp_sk(sk);
-               read_lock_bh(&tp->syn_wait_lock);
-               if (tp->listen_opt && tp->listen_opt->qlen)
+               read_lock_bh(&tp->accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&tp->accept_queue))
                        goto start_req;
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
                sk = sk_next(sk);
        }
 get_sk:
@@ -2179,8 +1917,8 @@ get_sk:
                        goto out;
                }
                tp = tcp_sk(sk);
-               read_lock_bh(&tp->syn_wait_lock);
-               if (tp->listen_opt && tp->listen_opt->qlen) {
+               read_lock_bh(&tp->accept_queue.syn_wait_lock);
+               if (reqsk_queue_len(&tp->accept_queue)) {
 start_req:
                        st->uid         = sock_i_uid(sk);
                        st->syn_wait_sk = sk;
@@ -2188,10 +1926,10 @@ start_req:
                        st->sbucket     = 0;
                        goto get_req;
                }
-               read_unlock_bh(&tp->syn_wait_lock);
+               read_unlock_bh(&tp->accept_queue.syn_wait_lock);
        }
-       if (++st->bucket < TCP_LHTABLE_SIZE) {
-               sk = sk_head(&tcp_listening_hash[st->bucket]);
+       if (++st->bucket < INET_LHTABLE_SIZE) {
+               sk = sk_head(&tcp_hashinfo.listening_hash[st->bucket]);
                goto get_sk;
        }
        cur = NULL;
@@ -2215,7 +1953,7 @@ static void *established_get_first(struct seq_file *seq)
        struct tcp_iter_state* st = seq->private;
        void *rc = NULL;
 
-       for (st->bucket = 0; st->bucket < tcp_ehash_size; ++st->bucket) {
+       for (st->bucket = 0; st->bucket < tcp_hashinfo.ehash_size; ++st->bucket) {
                struct sock *sk;
                struct hlist_node *node;
                struct tcp_tw_bucket *tw;
@@ -2223,8 +1961,8 @@ static void *established_get_first(struct seq_file *seq)
                /* We can reschedule _before_ having picked the target: */
                cond_resched_softirq();
 
-               read_lock(&tcp_ehash[st->bucket].lock);
-               sk_for_each(sk, node, &tcp_ehash[st->bucket].chain) {
+               read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+               sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
                        if (sk->sk_family != st->family) {
                                continue;
                        }
@@ -2233,14 +1971,14 @@ static void *established_get_first(struct seq_file *seq)
                }
                st->state = TCP_SEQ_STATE_TIME_WAIT;
                tw_for_each(tw, node,
-                           &tcp_ehash[st->bucket + tcp_ehash_size].chain) {
+                           &tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain) {
                        if (tw->tw_family != st->family) {
                                continue;
                        }
                        rc = tw;
                        goto out;
                }
-               read_unlock(&tcp_ehash[st->bucket].lock);
+               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
        }
 out:
@@ -2267,15 +2005,15 @@ get_tw:
                        cur = tw;
                        goto out;
                }
-               read_unlock(&tcp_ehash[st->bucket].lock);
+               read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
                st->state = TCP_SEQ_STATE_ESTABLISHED;
 
                /* We can reschedule between buckets: */
                cond_resched_softirq();
 
-               if (++st->bucket < tcp_ehash_size) {
-                       read_lock(&tcp_ehash[st->bucket].lock);
-                       sk = sk_head(&tcp_ehash[st->bucket].chain);
+               if (++st->bucket < tcp_hashinfo.ehash_size) {
+                       read_lock(&tcp_hashinfo.ehash[st->bucket].lock);
+                       sk = sk_head(&tcp_hashinfo.ehash[st->bucket].chain);
                } else {
                        cur = NULL;
                        goto out;
@@ -2289,7 +2027,7 @@ get_tw:
        }
 
        st->state = TCP_SEQ_STATE_TIME_WAIT;
-       tw = tw_head(&tcp_ehash[st->bucket + tcp_ehash_size].chain);
+       tw = tw_head(&tcp_hashinfo.ehash[st->bucket + tcp_hashinfo.ehash_size].chain);
        goto get_tw;
 found:
        cur = sk;
@@ -2313,12 +2051,12 @@ static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
        void *rc;
        struct tcp_iter_state* st = seq->private;
 
-       tcp_listen_lock();
+       inet_listen_lock(&tcp_hashinfo);
        st->state = TCP_SEQ_STATE_LISTENING;
        rc        = listening_get_idx(seq, &pos);
 
        if (!rc) {
-               tcp_listen_unlock();
+               inet_listen_unlock(&tcp_hashinfo);
                local_bh_disable();
                st->state = TCP_SEQ_STATE_ESTABLISHED;
                rc        = established_get_idx(seq, pos);
@@ -2351,7 +2089,7 @@ static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
        case TCP_SEQ_STATE_LISTENING:
                rc = listening_get_next(seq, v);
                if (!rc) {
-                       tcp_listen_unlock();
+                       inet_listen_unlock(&tcp_hashinfo);
                        local_bh_disable();
                        st->state = TCP_SEQ_STATE_ESTABLISHED;
                        rc        = established_get_first(seq);
@@ -2375,16 +2113,16 @@ static void tcp_seq_stop(struct seq_file *seq, void *v)
        case TCP_SEQ_STATE_OPENREQ:
                if (v) {
                        struct tcp_sock *tp = tcp_sk(st->syn_wait_sk);
-                       read_unlock_bh(&tp->syn_wait_lock);
+                       read_unlock_bh(&tp->accept_queue.syn_wait_lock);
                }
        case TCP_SEQ_STATE_LISTENING:
                if (v != SEQ_START_TOKEN)
-                       tcp_listen_unlock();
+                       inet_listen_unlock(&tcp_hashinfo);
                break;
        case TCP_SEQ_STATE_TIME_WAIT:
        case TCP_SEQ_STATE_ESTABLISHED:
                if (v)
-                       read_unlock(&tcp_ehash[st->bucket].lock);
+                       read_unlock(&tcp_hashinfo.ehash[st->bucket].lock);
                local_bh_enable();
                break;
        }
@@ -2451,18 +2189,19 @@ void tcp_proc_unregister(struct tcp_seq_afinfo *afinfo)
        memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); 
 }
 
-static void get_openreq4(struct sock *sk, struct open_request *req,
+static void get_openreq4(struct sock *sk, struct request_sock *req,
                         char *tmpbuf, int i, int uid)
 {
+       const struct inet_request_sock *ireq = inet_rsk(req);
        int ttd = req->expires - jiffies;
 
        sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
                " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
                i,
-               req->af.v4_req.loc_addr,
+               ireq->loc_addr,
                ntohs(inet_sk(sk)->sport),
-               req->af.v4_req.rmt_addr,
-               ntohs(req->rmt_port),
+               ireq->rmt_addr,
+               ntohs(ireq->rmt_port),
                TCP_SYN_RECV,
                0, 0, /* could print option size, but that is af dependent. */
                1,    /* timers active (only the expire timer) */
@@ -2618,6 +2357,7 @@ struct proto tcp_prot = {
        .sysctl_rmem            = sysctl_tcp_rmem,
        .max_header             = MAX_TCP_HEADER,
        .obj_size               = sizeof(struct tcp_sock),
+       .rsk_prot               = &tcp_request_sock_ops,
 };
 
 
@@ -2638,19 +2378,13 @@ void __init tcp_v4_init(struct net_proto_family *ops)
 }
 
 EXPORT_SYMBOL(ipv4_specific);
-EXPORT_SYMBOL(tcp_bind_hash);
-EXPORT_SYMBOL(tcp_bucket_create);
+EXPORT_SYMBOL(inet_bind_bucket_create);
 EXPORT_SYMBOL(tcp_hashinfo);
-EXPORT_SYMBOL(tcp_inherit_port);
-EXPORT_SYMBOL(tcp_listen_wlock);
-EXPORT_SYMBOL(tcp_port_rover);
 EXPORT_SYMBOL(tcp_prot);
-EXPORT_SYMBOL(tcp_put_port);
 EXPORT_SYMBOL(tcp_unhash);
 EXPORT_SYMBOL(tcp_v4_conn_request);
 EXPORT_SYMBOL(tcp_v4_connect);
 EXPORT_SYMBOL(tcp_v4_do_rcv);
-EXPORT_SYMBOL(tcp_v4_rebuild_header);
 EXPORT_SYMBOL(tcp_v4_remember_stamp);
 EXPORT_SYMBOL(tcp_v4_send_check);
 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
@@ -2660,7 +2394,6 @@ EXPORT_SYMBOL(tcp_proc_register);
 EXPORT_SYMBOL(tcp_proc_unregister);
 #endif
 EXPORT_SYMBOL(sysctl_local_port_range);
-EXPORT_SYMBOL(sysctl_max_syn_backlog);
 EXPORT_SYMBOL(sysctl_tcp_low_latency);
 EXPORT_SYMBOL(sysctl_tcp_tw_reuse);