2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
83 #include <linux/inetdevice.h>
85 #include <crypto/hash.h>
86 #include <linux/scatterlist.h>
88 #ifdef CONFIG_TCP_MD5SIG
89 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
90 __be32 daddr, __be32 saddr, const struct tcphdr *th);
93 struct inet_hashinfo tcp_hashinfo;
94 EXPORT_SYMBOL(tcp_hashinfo);
96 static u32 tcp_v4_init_seq(const struct sk_buff *skb)
98 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 tcp_hdr(skb)->source);
104 static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
106 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
153 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 inet_opt = rcu_dereference_protected(inet->inet_opt,
163 lockdep_sock_is_held(sk));
164 if (inet_opt && inet_opt->opt.srr) {
167 nexthop = inet_opt->opt.faddr;
170 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port;
172 fl4 = &inet->cork.fl.u.ip4;
173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
176 orig_sport, orig_dport, sk);
179 if (err == -ENETUNREACH)
180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 if (!inet_opt || !inet_opt->opt.srr)
192 if (!inet->inet_saddr)
193 inet->inet_saddr = fl4->saddr;
194 sk_rcv_saddr_set(sk, inet->inet_saddr);
196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 /* Reset inherited state */
198 tp->rx_opt.ts_recent = 0;
199 tp->rx_opt.ts_recent_stamp = 0;
200 if (likely(!tp->repair))
204 inet->inet_dport = usin->sin_port;
205 sk_daddr_set(sk, daddr);
207 inet_csk(sk)->icsk_ext_hdr_len = 0;
209 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
211 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
213 /* Socket identity is still unknown (sport may be zero).
214 * However we set state to SYN-SENT and not releasing socket
215 * lock select source port, enter ourselves into the hash tables and
216 * complete initialization after this.
218 tcp_set_state(sk, TCP_SYN_SENT);
219 err = inet_hash_connect(tcp_death_row, sk);
225 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
226 inet->inet_sport, inet->inet_dport, sk);
232 /* OK, now commit destination to socket. */
233 sk->sk_gso_type = SKB_GSO_TCPV4;
234 sk_setup_caps(sk, &rt->dst);
237 if (likely(!tp->repair)) {
239 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
243 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
248 inet->inet_id = prandom_u32();
250 if (tcp_fastopen_defer_connect(sk, &err))
255 err = tcp_connect(sk);
264 * This unhashes the socket and releases the local port,
267 tcp_set_state(sk, TCP_CLOSE);
269 sk->sk_route_caps = 0;
270 inet->inet_dport = 0;
273 EXPORT_SYMBOL(tcp_v4_connect);
276 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
277 * It can be called through tcp_release_cb() if socket was owned by user
278 * at the time tcp_v4_err() was called to handle ICMP message.
280 void tcp_v4_mtu_reduced(struct sock *sk)
282 struct inet_sock *inet = inet_sk(sk);
283 struct dst_entry *dst;
286 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
288 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
289 dst = inet_csk_update_pmtu(sk, mtu);
293 /* Something is about to be wrong... Remember soft error
294 * for the case, if this connection will not able to recover.
296 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
297 sk->sk_err_soft = EMSGSIZE;
301 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
302 ip_sk_accept_pmtu(sk) &&
303 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
304 tcp_sync_mss(sk, mtu);
306 /* Resend the TCP packet because it's
307 * clear that the old packet has been
308 * dropped. This is the new "fast" path mtu
311 tcp_simple_retransmit(sk);
312 } /* else let the usual retransmit timer handle it */
314 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
316 static void do_redirect(struct sk_buff *skb, struct sock *sk)
318 struct dst_entry *dst = __sk_dst_check(sk, 0);
321 dst->ops->redirect(dst, sk, skb);
325 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
326 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
328 struct request_sock *req = inet_reqsk(sk);
329 struct net *net = sock_net(sk);
331 /* ICMPs are not backlogged, hence we cannot get
332 * an established socket here.
334 if (seq != tcp_rsk(req)->snt_isn) {
335 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
338 * Still in SYN_RECV, just remove it silently.
339 * There is no good way to pass the error to the newly
340 * created socket, and POSIX does not want network
341 * errors returned from accept().
343 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
344 tcp_listendrop(req->rsk_listener);
348 EXPORT_SYMBOL(tcp_req_err);
351 * This routine is called by the ICMP module when it gets some
352 * sort of error condition. If err < 0 then the socket should
353 * be closed and the error returned to the user. If err > 0
354 * it's just the icmp type << 8 | icmp code. After adjustment
355 * header points to the first 8 bytes of the tcp header. We need
356 * to find the appropriate port.
358 * The locking strategy used here is very "optimistic". When
359 * someone else accesses the socket the ICMP is just dropped
360 * and for some paths there is no check at all.
361 * A more general error queue to queue errors for later handling
362 * is probably better.
366 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
368 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
369 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
370 struct inet_connection_sock *icsk;
372 struct inet_sock *inet;
373 const int type = icmp_hdr(icmp_skb)->type;
374 const int code = icmp_hdr(icmp_skb)->code;
377 struct request_sock *fastopen;
382 struct net *net = dev_net(icmp_skb->dev);
384 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
385 th->dest, iph->saddr, ntohs(th->source),
386 inet_iif(icmp_skb), 0);
388 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
391 if (sk->sk_state == TCP_TIME_WAIT) {
392 inet_twsk_put(inet_twsk(sk));
395 seq = ntohl(th->seq);
396 if (sk->sk_state == TCP_NEW_SYN_RECV)
397 return tcp_req_err(sk, seq,
398 type == ICMP_PARAMETERPROB ||
399 type == ICMP_TIME_EXCEEDED ||
400 (type == ICMP_DEST_UNREACH &&
401 (code == ICMP_NET_UNREACH ||
402 code == ICMP_HOST_UNREACH)));
405 /* If too many ICMPs get dropped on busy
406 * servers this needs to be solved differently.
407 * We do take care of PMTU discovery (RFC1191) special case :
408 * we can receive locally generated ICMP messages while socket is held.
410 if (sock_owned_by_user(sk)) {
411 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
414 if (sk->sk_state == TCP_CLOSE)
417 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
418 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
424 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
425 fastopen = tp->fastopen_rsk;
426 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
427 if (sk->sk_state != TCP_LISTEN &&
428 !between(seq, snd_una, tp->snd_nxt)) {
429 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
435 if (!sock_owned_by_user(sk))
436 do_redirect(icmp_skb, sk);
438 case ICMP_SOURCE_QUENCH:
439 /* Just silently ignore these. */
441 case ICMP_PARAMETERPROB:
444 case ICMP_DEST_UNREACH:
445 if (code > NR_ICMP_UNREACH)
448 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
449 /* We are not interested in TCP_LISTEN and open_requests
450 * (SYN-ACKs send out by Linux are always <576bytes so
451 * they should go through unfragmented).
453 if (sk->sk_state == TCP_LISTEN)
456 WRITE_ONCE(tp->mtu_info, info);
457 if (!sock_owned_by_user(sk)) {
458 tcp_v4_mtu_reduced(sk);
460 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
466 err = icmp_err_convert[code].errno;
467 /* check if icmp_skb allows revert of backoff
468 * (see draft-zimmermann-tcp-lcd) */
469 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
471 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
472 !icsk->icsk_backoff || fastopen)
475 if (sock_owned_by_user(sk))
478 skb = tcp_write_queue_head(sk);
479 if (WARN_ON_ONCE(!skb))
482 icsk->icsk_backoff--;
483 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
485 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
487 tcp_mstamp_refresh(tp);
488 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
489 remaining = icsk->icsk_rto -
490 usecs_to_jiffies(delta_us);
493 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
494 remaining, TCP_RTO_MAX);
496 /* RTO revert clocked out retransmission.
497 * Will retransmit now */
498 tcp_retransmit_timer(sk);
502 case ICMP_TIME_EXCEEDED:
509 switch (sk->sk_state) {
512 /* Only in fast or simultaneous open. If a fast open socket is
513 * is already accepted it is treated as a connected one below.
515 if (fastopen && !fastopen->sk)
518 if (!sock_owned_by_user(sk)) {
521 sk->sk_error_report(sk);
525 sk->sk_err_soft = err;
530 /* If we've already connected we will keep trying
531 * until we time out, or the user gives up.
533 * rfc1122 4.2.3.9 allows to consider as hard errors
534 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
535 * but it is obsoleted by pmtu discovery).
537 * Note, that in modern internet, where routing is unreliable
538 * and in each dark corner broken firewalls sit, sending random
539 * errors ordered by their masters even this two messages finally lose
540 * their original sense (even Linux sends invalid PORT_UNREACHs)
542 * Now we are in compliance with RFCs.
547 if (!sock_owned_by_user(sk) && inet->recverr) {
549 sk->sk_error_report(sk);
550 } else { /* Only an error on timeout */
551 sk->sk_err_soft = err;
559 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
561 struct tcphdr *th = tcp_hdr(skb);
563 if (skb->ip_summed == CHECKSUM_PARTIAL) {
564 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
565 skb->csum_start = skb_transport_header(skb) - skb->head;
566 skb->csum_offset = offsetof(struct tcphdr, check);
568 th->check = tcp_v4_check(skb->len, saddr, daddr,
575 /* This routine computes an IPv4 TCP checksum. */
576 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
578 const struct inet_sock *inet = inet_sk(sk);
580 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
582 EXPORT_SYMBOL(tcp_v4_send_check);
585 * This routine will send an RST to the other tcp.
587 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
589 * Answer: if a packet caused RST, it is not for a socket
590 * existing in our system, if it is matched to a socket,
591 * it is just duplicate segment or bug in other side's TCP.
592 * So that we build reply only basing on parameters
593 * arrived with segment.
594 * Exception: precedence violation. We do not implement it in any case.
597 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
599 const struct tcphdr *th = tcp_hdr(skb);
602 #ifdef CONFIG_TCP_MD5SIG
603 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
606 struct ip_reply_arg arg;
607 #ifdef CONFIG_TCP_MD5SIG
608 struct tcp_md5sig_key *key = NULL;
609 const __u8 *hash_location = NULL;
610 unsigned char newhash[16];
612 struct sock *sk1 = NULL;
616 /* Never send a reset in response to a reset. */
620 /* If sk not NULL, it means we did a successful lookup and incoming
621 * route had to be correct. prequeue might have dropped our dst.
623 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
626 /* Swap the send and the receive. */
627 memset(&rep, 0, sizeof(rep));
628 rep.th.dest = th->source;
629 rep.th.source = th->dest;
630 rep.th.doff = sizeof(struct tcphdr) / 4;
634 rep.th.seq = th->ack_seq;
637 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
638 skb->len - (th->doff << 2));
641 memset(&arg, 0, sizeof(arg));
642 arg.iov[0].iov_base = (unsigned char *)&rep;
643 arg.iov[0].iov_len = sizeof(rep.th);
645 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
646 #ifdef CONFIG_TCP_MD5SIG
648 hash_location = tcp_parse_md5sig_option(th);
649 if (sk && sk_fullsock(sk)) {
650 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
651 &ip_hdr(skb)->saddr, AF_INET);
652 } else if (hash_location) {
654 * active side is lost. Try to find listening socket through
655 * source port, and then find md5 key through listening socket.
656 * we are not loose security here:
657 * Incoming packet is checked with md5 hash with finding key,
658 * no RST generated if md5 hash doesn't match.
660 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
662 th->source, ip_hdr(skb)->daddr,
663 ntohs(th->source), inet_iif(skb),
665 /* don't send rst if it can't find key */
669 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
670 &ip_hdr(skb)->saddr, AF_INET);
675 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
676 if (genhash || memcmp(hash_location, newhash, 16) != 0)
682 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
684 (TCPOPT_MD5SIG << 8) |
686 /* Update length and the length the header thinks exists */
687 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
688 rep.th.doff = arg.iov[0].iov_len / 4;
690 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
691 key, ip_hdr(skb)->saddr,
692 ip_hdr(skb)->daddr, &rep.th);
695 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
696 ip_hdr(skb)->saddr, /* XXX */
697 arg.iov[0].iov_len, IPPROTO_TCP, 0);
698 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
699 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
701 /* When socket is gone, all binding information is lost.
702 * routing might fail in this case. No choice here, if we choose to force
703 * input interface, we will misroute in case of asymmetric route.
706 arg.bound_dev_if = sk->sk_bound_dev_if;
708 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
709 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
711 arg.tos = ip_hdr(skb)->tos;
712 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
714 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
715 skb, &TCP_SKB_CB(skb)->header.h4.opt,
716 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
717 &arg, arg.iov[0].iov_len);
719 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
720 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
723 #ifdef CONFIG_TCP_MD5SIG
729 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
730 outside socket context is ugly, certainly. What can I do?
733 static void tcp_v4_send_ack(const struct sock *sk,
734 struct sk_buff *skb, u32 seq, u32 ack,
735 u32 win, u32 tsval, u32 tsecr, int oif,
736 struct tcp_md5sig_key *key,
737 int reply_flags, u8 tos)
739 const struct tcphdr *th = tcp_hdr(skb);
742 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
743 #ifdef CONFIG_TCP_MD5SIG
744 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
748 struct net *net = sock_net(sk);
749 struct ip_reply_arg arg;
751 memset(&rep.th, 0, sizeof(struct tcphdr));
752 memset(&arg, 0, sizeof(arg));
754 arg.iov[0].iov_base = (unsigned char *)&rep;
755 arg.iov[0].iov_len = sizeof(rep.th);
757 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
758 (TCPOPT_TIMESTAMP << 8) |
760 rep.opt[1] = htonl(tsval);
761 rep.opt[2] = htonl(tsecr);
762 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
765 /* Swap the send and the receive. */
766 rep.th.dest = th->source;
767 rep.th.source = th->dest;
768 rep.th.doff = arg.iov[0].iov_len / 4;
769 rep.th.seq = htonl(seq);
770 rep.th.ack_seq = htonl(ack);
772 rep.th.window = htons(win);
774 #ifdef CONFIG_TCP_MD5SIG
776 int offset = (tsecr) ? 3 : 0;
778 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
780 (TCPOPT_MD5SIG << 8) |
782 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
783 rep.th.doff = arg.iov[0].iov_len/4;
785 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
786 key, ip_hdr(skb)->saddr,
787 ip_hdr(skb)->daddr, &rep.th);
790 arg.flags = reply_flags;
791 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
792 ip_hdr(skb)->saddr, /* XXX */
793 arg.iov[0].iov_len, IPPROTO_TCP, 0);
794 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
796 arg.bound_dev_if = oif;
798 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
800 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
801 skb, &TCP_SKB_CB(skb)->header.h4.opt,
802 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
803 &arg, arg.iov[0].iov_len);
805 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
809 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
811 struct inet_timewait_sock *tw = inet_twsk(sk);
812 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
814 tcp_v4_send_ack(sk, skb,
815 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
816 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
817 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
820 tcp_twsk_md5_key(tcptw),
821 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
828 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
829 struct request_sock *req)
831 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
832 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
834 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
838 * The window field (SEG.WND) of every outgoing segment, with the
839 * exception of <SYN> segments, MUST be right-shifted by
840 * Rcv.Wind.Shift bits:
842 tcp_v4_send_ack(sk, skb, seq,
843 tcp_rsk(req)->rcv_nxt,
844 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
845 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
848 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
850 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
855 * Send a SYN-ACK after having received a SYN.
856 * This still operates on a request_sock only, not on a big
859 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
861 struct request_sock *req,
862 struct tcp_fastopen_cookie *foc,
863 enum tcp_synack_type synack_type)
865 const struct inet_request_sock *ireq = inet_rsk(req);
870 /* First, grab a route. */
871 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
874 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
877 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
880 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
882 rcu_dereference(ireq->ireq_opt));
884 err = net_xmit_eval(err);
891 * IPv4 request_sock destructor.
893 static void tcp_v4_reqsk_destructor(struct request_sock *req)
895 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
898 #ifdef CONFIG_TCP_MD5SIG
900 * RFC2385 MD5 checksumming requires a mapping of
901 * IP address->MD5 Key.
902 * We need to maintain these in the sk structure.
905 /* Find the Key structure for an address. */
906 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
907 const union tcp_md5_addr *addr,
910 const struct tcp_sock *tp = tcp_sk(sk);
911 struct tcp_md5sig_key *key;
912 const struct tcp_md5sig_info *md5sig;
914 struct tcp_md5sig_key *best_match = NULL;
917 /* caller either holds rcu_read_lock() or socket lock */
918 md5sig = rcu_dereference_check(tp->md5sig_info,
919 lockdep_sock_is_held(sk));
923 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
924 if (key->family != family)
927 if (family == AF_INET) {
928 mask = inet_make_mask(key->prefixlen);
929 match = (key->addr.a4.s_addr & mask) ==
930 (addr->a4.s_addr & mask);
931 #if IS_ENABLED(CONFIG_IPV6)
932 } else if (family == AF_INET6) {
933 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
940 if (match && (!best_match ||
941 key->prefixlen > best_match->prefixlen))
946 EXPORT_SYMBOL(tcp_md5_do_lookup);
948 static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
949 const union tcp_md5_addr *addr,
950 int family, u8 prefixlen)
952 const struct tcp_sock *tp = tcp_sk(sk);
953 struct tcp_md5sig_key *key;
954 unsigned int size = sizeof(struct in_addr);
955 const struct tcp_md5sig_info *md5sig;
957 /* caller either holds rcu_read_lock() or socket lock */
958 md5sig = rcu_dereference_check(tp->md5sig_info,
959 lockdep_sock_is_held(sk));
962 #if IS_ENABLED(CONFIG_IPV6)
963 if (family == AF_INET6)
964 size = sizeof(struct in6_addr);
966 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
967 if (key->family != family)
969 if (!memcmp(&key->addr, addr, size) &&
970 key->prefixlen == prefixlen)
976 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
977 const struct sock *addr_sk)
979 const union tcp_md5_addr *addr;
981 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
982 return tcp_md5_do_lookup(sk, addr, AF_INET);
984 EXPORT_SYMBOL(tcp_v4_md5_lookup);
986 /* This can be called on a newly created socket, from other files */
987 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
988 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
991 /* Add Key to the list */
992 struct tcp_md5sig_key *key;
993 struct tcp_sock *tp = tcp_sk(sk);
994 struct tcp_md5sig_info *md5sig;
996 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
998 /* Pre-existing entry - just update that one.
999 * Note that the key might be used concurrently.
1001 memcpy(key->key, newkey, newkeylen);
1003 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1004 * Also note that a reader could catch new key->keylen value
1005 * but old key->key[], this is the reason we use __GFP_ZERO
1006 * at sock_kmalloc() time below these lines.
1008 WRITE_ONCE(key->keylen, newkeylen);
1013 md5sig = rcu_dereference_protected(tp->md5sig_info,
1014 lockdep_sock_is_held(sk));
1016 md5sig = kmalloc(sizeof(*md5sig), gfp);
1020 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1021 INIT_HLIST_HEAD(&md5sig->head);
1022 rcu_assign_pointer(tp->md5sig_info, md5sig);
1025 key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
1028 if (!tcp_alloc_md5sig_pool()) {
1029 sock_kfree_s(sk, key, sizeof(*key));
1033 memcpy(key->key, newkey, newkeylen);
1034 key->keylen = newkeylen;
1035 key->family = family;
1036 key->prefixlen = prefixlen;
1037 memcpy(&key->addr, addr,
1038 (family == AF_INET6) ? sizeof(struct in6_addr) :
1039 sizeof(struct in_addr));
1040 hlist_add_head_rcu(&key->node, &md5sig->head);
1043 EXPORT_SYMBOL(tcp_md5_do_add);
1045 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1048 struct tcp_md5sig_key *key;
1050 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
1053 hlist_del_rcu(&key->node);
1054 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1055 kfree_rcu(key, rcu);
1058 EXPORT_SYMBOL(tcp_md5_do_del);
1060 static void tcp_clear_md5_list(struct sock *sk)
1062 struct tcp_sock *tp = tcp_sk(sk);
1063 struct tcp_md5sig_key *key;
1064 struct hlist_node *n;
1065 struct tcp_md5sig_info *md5sig;
1067 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1069 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1070 hlist_del_rcu(&key->node);
1071 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1072 kfree_rcu(key, rcu);
1076 static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1077 char __user *optval, int optlen)
1079 struct tcp_md5sig cmd;
1080 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1083 if (optlen < sizeof(cmd))
1086 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1089 if (sin->sin_family != AF_INET)
1092 if (optname == TCP_MD5SIG_EXT &&
1093 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1094 prefixlen = cmd.tcpm_prefixlen;
1099 if (!cmd.tcpm_keylen)
1100 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1101 AF_INET, prefixlen);
1103 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1106 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1107 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
1111 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1112 __be32 daddr, __be32 saddr,
1113 const struct tcphdr *th, int nbytes)
1115 struct tcp4_pseudohdr *bp;
1116 struct scatterlist sg;
1123 bp->protocol = IPPROTO_TCP;
1124 bp->len = cpu_to_be16(nbytes);
1126 _th = (struct tcphdr *)(bp + 1);
1127 memcpy(_th, th, sizeof(*th));
1130 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1131 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1132 sizeof(*bp) + sizeof(*th));
1133 return crypto_ahash_update(hp->md5_req);
1136 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1137 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1139 struct tcp_md5sig_pool *hp;
1140 struct ahash_request *req;
1142 hp = tcp_get_md5sig_pool();
1144 goto clear_hash_noput;
1147 if (crypto_ahash_init(req))
1149 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1151 if (tcp_md5_hash_key(hp, key))
1153 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1154 if (crypto_ahash_final(req))
1157 tcp_put_md5sig_pool();
1161 tcp_put_md5sig_pool();
1163 memset(md5_hash, 0, 16);
1167 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1168 const struct sock *sk,
1169 const struct sk_buff *skb)
1171 struct tcp_md5sig_pool *hp;
1172 struct ahash_request *req;
1173 const struct tcphdr *th = tcp_hdr(skb);
1174 __be32 saddr, daddr;
1176 if (sk) { /* valid for establish/request sockets */
1177 saddr = sk->sk_rcv_saddr;
1178 daddr = sk->sk_daddr;
1180 const struct iphdr *iph = ip_hdr(skb);
1185 hp = tcp_get_md5sig_pool();
1187 goto clear_hash_noput;
1190 if (crypto_ahash_init(req))
1193 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1195 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1197 if (tcp_md5_hash_key(hp, key))
1199 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1200 if (crypto_ahash_final(req))
1203 tcp_put_md5sig_pool();
1207 tcp_put_md5sig_pool();
1209 memset(md5_hash, 0, 16);
1212 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1216 /* Called with rcu_read_lock() */
1217 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1218 const struct sk_buff *skb)
1220 #ifdef CONFIG_TCP_MD5SIG
1222 * This gets called for each TCP segment that arrives
1223 * so we want to be efficient.
1224 * We have 3 drop cases:
1225 * o No MD5 hash and one expected.
1226 * o MD5 hash and we're not expecting one.
1227 * o MD5 hash and its wrong.
1229 const __u8 *hash_location = NULL;
1230 struct tcp_md5sig_key *hash_expected;
1231 const struct iphdr *iph = ip_hdr(skb);
1232 const struct tcphdr *th = tcp_hdr(skb);
1234 unsigned char newhash[16];
1236 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1238 hash_location = tcp_parse_md5sig_option(th);
1240 /* We've parsed the options - do we have a hash? */
1241 if (!hash_expected && !hash_location)
1244 if (hash_expected && !hash_location) {
1245 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1249 if (!hash_expected && hash_location) {
1250 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1254 /* Okay, so this is hash_expected and hash_location -
1255 * so we need to calculate the checksum.
1257 genhash = tcp_v4_md5_hash_skb(newhash,
1261 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1262 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1263 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1264 &iph->saddr, ntohs(th->source),
1265 &iph->daddr, ntohs(th->dest),
1266 genhash ? " tcp_v4_calc_md5_hash failed"
1275 static void tcp_v4_init_req(struct request_sock *req,
1276 const struct sock *sk_listener,
1277 struct sk_buff *skb)
1279 struct inet_request_sock *ireq = inet_rsk(req);
1280 struct net *net = sock_net(sk_listener);
1282 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1283 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1284 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
1287 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1289 const struct request_sock *req)
1291 return inet_csk_route_req(sk, &fl->u.ip4, req);
1294 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1296 .obj_size = sizeof(struct tcp_request_sock),
1297 .rtx_syn_ack = tcp_rtx_synack,
1298 .send_ack = tcp_v4_reqsk_send_ack,
1299 .destructor = tcp_v4_reqsk_destructor,
1300 .send_reset = tcp_v4_send_reset,
1301 .syn_ack_timeout = tcp_syn_ack_timeout,
1304 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1305 .mss_clamp = TCP_MSS_DEFAULT,
1306 #ifdef CONFIG_TCP_MD5SIG
1307 .req_md5_lookup = tcp_v4_md5_lookup,
1308 .calc_md5_hash = tcp_v4_md5_hash_skb,
1310 .init_req = tcp_v4_init_req,
1311 #ifdef CONFIG_SYN_COOKIES
1312 .cookie_init_seq = cookie_v4_init_sequence,
1314 .route_req = tcp_v4_route_req,
1315 .init_seq = tcp_v4_init_seq,
1316 .init_ts_off = tcp_v4_init_ts_off,
1317 .send_synack = tcp_v4_send_synack,
1320 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1322 /* Never answer to SYNs send to broadcast or multicast */
1323 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1326 return tcp_conn_request(&tcp_request_sock_ops,
1327 &tcp_request_sock_ipv4_ops, sk, skb);
1333 EXPORT_SYMBOL(tcp_v4_conn_request);
1337 * The three way handshake has completed - we got a valid synack -
1338 * now create the new socket.
1340 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1341 struct request_sock *req,
1342 struct dst_entry *dst,
1343 struct request_sock *req_unhash,
1346 struct inet_request_sock *ireq;
1347 struct inet_sock *newinet;
1348 struct tcp_sock *newtp;
1350 #ifdef CONFIG_TCP_MD5SIG
1351 struct tcp_md5sig_key *key;
1353 struct ip_options_rcu *inet_opt;
1355 if (sk_acceptq_is_full(sk))
1358 newsk = tcp_create_openreq_child(sk, req, skb);
1362 newsk->sk_gso_type = SKB_GSO_TCPV4;
1363 inet_sk_rx_dst_set(newsk, skb);
1365 newtp = tcp_sk(newsk);
1366 newinet = inet_sk(newsk);
1367 ireq = inet_rsk(req);
1368 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1369 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1370 newsk->sk_bound_dev_if = ireq->ir_iif;
1371 newinet->inet_saddr = ireq->ir_loc_addr;
1372 inet_opt = rcu_dereference(ireq->ireq_opt);
1373 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
1374 newinet->mc_index = inet_iif(skb);
1375 newinet->mc_ttl = ip_hdr(skb)->ttl;
1376 newinet->rcv_tos = ip_hdr(skb)->tos;
1377 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1379 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1380 newinet->inet_id = prandom_u32();
1383 dst = inet_csk_route_child_sock(sk, newsk, req);
1387 /* syncookie case : see end of cookie_v4_check() */
1389 sk_setup_caps(newsk, dst);
1391 tcp_ca_openreq_child(newsk, dst);
1393 tcp_sync_mss(newsk, dst_mtu(dst));
1394 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
1396 tcp_initialize_rcv_mss(newsk);
1398 #ifdef CONFIG_TCP_MD5SIG
1399 /* Copy over the MD5 key from the original socket */
1400 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1404 * We're using one, so create a matching key
1405 * on the newsk structure. If we fail to get
1406 * memory, then we end up not copying the key
1409 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1410 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
1411 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1415 if (__inet_inherit_port(sk, newsk) < 0)
1417 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1418 if (likely(*own_req)) {
1419 tcp_move_syn(newtp, req);
1420 ireq->ireq_opt = NULL;
1422 newinet->inet_opt = NULL;
1427 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1434 newinet->inet_opt = NULL;
1435 inet_csk_prepare_forced_close(newsk);
1439 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1441 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1443 #ifdef CONFIG_SYN_COOKIES
1444 const struct tcphdr *th = tcp_hdr(skb);
1447 sk = cookie_v4_check(sk, skb);
1452 /* The socket must have it's spinlock held when we get
1453 * here, unless it is a TCP_LISTEN socket.
1455 * We have a potential double-lock case here, so even when
1456 * doing backlog processing we use the BH locking scheme.
1457 * This is because we cannot sleep with the original spinlock
1460 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1464 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1465 struct dst_entry *dst = sk->sk_rx_dst;
1467 sock_rps_save_rxhash(sk, skb);
1468 sk_mark_napi_id(sk, skb);
1470 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1471 !dst->ops->check(dst, 0)) {
1473 sk->sk_rx_dst = NULL;
1476 tcp_rcv_established(sk, skb, tcp_hdr(skb));
1480 if (tcp_checksum_complete(skb))
1483 if (sk->sk_state == TCP_LISTEN) {
1484 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1489 if (tcp_child_process(sk, nsk, skb)) {
1496 sock_rps_save_rxhash(sk, skb);
1498 if (tcp_rcv_state_process(sk, skb)) {
1505 tcp_v4_send_reset(rsk, skb);
1508 /* Be careful here. If this function gets more complicated and
1509 * gcc suffers from register pressure on the x86, sk (in %ebx)
1510 * might be destroyed here. This current version compiles correctly,
1511 * but you have been warned.
1516 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1517 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1520 EXPORT_SYMBOL(tcp_v4_do_rcv);
1522 int tcp_v4_early_demux(struct sk_buff *skb)
1524 const struct iphdr *iph;
1525 const struct tcphdr *th;
1528 if (skb->pkt_type != PACKET_HOST)
1531 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1537 if (th->doff < sizeof(struct tcphdr) / 4)
1540 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1541 iph->saddr, th->source,
1542 iph->daddr, ntohs(th->dest),
1543 skb->skb_iif, inet_sdif(skb));
1546 skb->destructor = sock_edemux;
1547 if (sk_fullsock(sk)) {
1548 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1551 dst = dst_check(dst, 0);
1553 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1554 skb_dst_set_noref(skb, dst);
1560 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1562 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1564 /* Only socket owner can try to collapse/prune rx queues
1565 * to reduce memory overhead, so add a little headroom here.
1566 * Few sockets backlog are possibly concurrently non empty.
1570 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1571 * we can fix skb->truesize to its real value to avoid future drops.
1572 * This is valid because skb is not yet charged to the socket.
1573 * It has been noticed pure SACK packets were sometimes dropped
1574 * (if cooked by drivers without copybreak feature).
1578 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1580 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1585 EXPORT_SYMBOL(tcp_add_backlog);
1587 int tcp_filter(struct sock *sk, struct sk_buff *skb)
1589 struct tcphdr *th = (struct tcphdr *)skb->data;
1591 return sk_filter_trim_cap(sk, skb, th->doff * 4);
1593 EXPORT_SYMBOL(tcp_filter);
1595 static void tcp_v4_restore_cb(struct sk_buff *skb)
1597 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1598 sizeof(struct inet_skb_parm));
1601 static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1602 const struct tcphdr *th)
1604 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1605 * barrier() makes sure compiler wont play fool^Waliasing games.
1607 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1608 sizeof(struct inet_skb_parm));
1611 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1612 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1613 skb->len - th->doff * 4);
1614 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1615 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1616 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1617 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1618 TCP_SKB_CB(skb)->sacked = 0;
1619 TCP_SKB_CB(skb)->has_rxtstamp =
1620 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1627 int tcp_v4_rcv(struct sk_buff *skb)
1629 struct net *net = dev_net(skb->dev);
1630 int sdif = inet_sdif(skb);
1631 const struct iphdr *iph;
1632 const struct tcphdr *th;
1637 if (skb->pkt_type != PACKET_HOST)
1640 /* Count it even if it's bad */
1641 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1643 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1646 th = (const struct tcphdr *)skb->data;
1648 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1650 if (!pskb_may_pull(skb, th->doff * 4))
1653 /* An explanation is required here, I think.
1654 * Packet length and doff are validated by header prediction,
1655 * provided case of th->doff==0 is eliminated.
1656 * So, we defer the checks. */
1658 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1661 th = (const struct tcphdr *)skb->data;
1664 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1665 th->dest, sdif, &refcounted);
1670 if (sk->sk_state == TCP_TIME_WAIT)
1673 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1674 struct request_sock *req = inet_reqsk(sk);
1677 sk = req->rsk_listener;
1678 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1679 sk_drops_add(sk, skb);
1683 if (tcp_checksum_complete(skb)) {
1687 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1688 inet_csk_reqsk_queue_drop_and_put(sk, req);
1691 /* We own a reference on the listener, increase it again
1692 * as we might lose it too soon.
1697 if (!tcp_filter(sk, skb)) {
1698 th = (const struct tcphdr *)skb->data;
1700 tcp_v4_fill_cb(skb, iph, th);
1701 nsk = tcp_check_req(sk, skb, req, false);
1705 goto discard_and_relse;
1709 tcp_v4_restore_cb(skb);
1710 } else if (tcp_child_process(sk, nsk, skb)) {
1711 tcp_v4_send_reset(nsk, skb);
1712 goto discard_and_relse;
1718 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1719 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1720 goto discard_and_relse;
1723 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1724 goto discard_and_relse;
1726 if (tcp_v4_inbound_md5_hash(sk, skb))
1727 goto discard_and_relse;
1731 if (tcp_filter(sk, skb))
1732 goto discard_and_relse;
1733 th = (const struct tcphdr *)skb->data;
1735 tcp_v4_fill_cb(skb, iph, th);
1739 if (sk->sk_state == TCP_LISTEN) {
1740 ret = tcp_v4_do_rcv(sk, skb);
1741 goto put_and_return;
1744 sk_incoming_cpu_update(sk);
1746 bh_lock_sock_nested(sk);
1747 tcp_segs_in(tcp_sk(sk), skb);
1749 if (!sock_owned_by_user(sk)) {
1750 ret = tcp_v4_do_rcv(sk, skb);
1751 } else if (tcp_add_backlog(sk, skb)) {
1752 goto discard_and_relse;
1763 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1766 tcp_v4_fill_cb(skb, iph, th);
1768 if (tcp_checksum_complete(skb)) {
1770 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1772 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1774 tcp_v4_send_reset(NULL, skb);
1778 /* Discard frame. */
1783 sk_drops_add(sk, skb);
1789 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1790 inet_twsk_put(inet_twsk(sk));
1794 tcp_v4_fill_cb(skb, iph, th);
1796 if (tcp_checksum_complete(skb)) {
1797 inet_twsk_put(inet_twsk(sk));
1800 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1802 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1805 iph->saddr, th->source,
1806 iph->daddr, th->dest,
1810 inet_twsk_deschedule_put(inet_twsk(sk));
1812 tcp_v4_restore_cb(skb);
1816 /* Fall through to ACK */
1819 tcp_v4_timewait_ack(sk, skb);
1822 tcp_v4_send_reset(sk, skb);
1823 inet_twsk_deschedule_put(inet_twsk(sk));
1825 case TCP_TW_SUCCESS:;
1830 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1831 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1832 .twsk_unique = tcp_twsk_unique,
1833 .twsk_destructor= tcp_twsk_destructor,
1836 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1838 struct dst_entry *dst = skb_dst(skb);
1840 if (dst && dst_hold_safe(dst)) {
1841 sk->sk_rx_dst = dst;
1842 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1845 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1847 const struct inet_connection_sock_af_ops ipv4_specific = {
1848 .queue_xmit = ip_queue_xmit,
1849 .send_check = tcp_v4_send_check,
1850 .rebuild_header = inet_sk_rebuild_header,
1851 .sk_rx_dst_set = inet_sk_rx_dst_set,
1852 .conn_request = tcp_v4_conn_request,
1853 .syn_recv_sock = tcp_v4_syn_recv_sock,
1854 .net_header_len = sizeof(struct iphdr),
1855 .setsockopt = ip_setsockopt,
1856 .getsockopt = ip_getsockopt,
1857 .addr2sockaddr = inet_csk_addr2sockaddr,
1858 .sockaddr_len = sizeof(struct sockaddr_in),
1859 #ifdef CONFIG_COMPAT
1860 .compat_setsockopt = compat_ip_setsockopt,
1861 .compat_getsockopt = compat_ip_getsockopt,
1863 .mtu_reduced = tcp_v4_mtu_reduced,
1865 EXPORT_SYMBOL(ipv4_specific);
1867 #ifdef CONFIG_TCP_MD5SIG
1868 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1869 .md5_lookup = tcp_v4_md5_lookup,
1870 .calc_md5_hash = tcp_v4_md5_hash_skb,
1871 .md5_parse = tcp_v4_parse_md5_keys,
1875 /* NOTE: A lot of things set to zero explicitly by call to
1876 * sk_alloc() so need not be done here.
1878 static int tcp_v4_init_sock(struct sock *sk)
1880 struct inet_connection_sock *icsk = inet_csk(sk);
1884 icsk->icsk_af_ops = &ipv4_specific;
1886 #ifdef CONFIG_TCP_MD5SIG
1887 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1893 void tcp_v4_destroy_sock(struct sock *sk)
1895 struct tcp_sock *tp = tcp_sk(sk);
1897 tcp_clear_xmit_timers(sk);
1899 tcp_cleanup_congestion_control(sk);
1901 tcp_cleanup_ulp(sk);
1903 /* Cleanup up the write buffer. */
1904 tcp_write_queue_purge(sk);
1906 /* Check if we want to disable active TFO */
1907 tcp_fastopen_active_disable_ofo_check(sk);
1909 /* Cleans up our, hopefully empty, out_of_order_queue. */
1910 skb_rbtree_purge(&tp->out_of_order_queue);
1912 #ifdef CONFIG_TCP_MD5SIG
1913 /* Clean up the MD5 key list, if any */
1914 if (tp->md5sig_info) {
1915 tcp_clear_md5_list(sk);
1916 kfree_rcu(tp->md5sig_info, rcu);
1917 tp->md5sig_info = NULL;
1921 /* Clean up a referenced TCP bind bucket. */
1922 if (inet_csk(sk)->icsk_bind_hash)
1925 BUG_ON(tp->fastopen_rsk);
1927 /* If socket is aborted during connect operation */
1928 tcp_free_fastopen_req(tp);
1929 tcp_saved_syn_free(tp);
1931 sk_sockets_allocated_dec(sk);
1933 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1935 #ifdef CONFIG_PROC_FS
1936 /* Proc filesystem TCP sock list dumping. */
1939 * Get next listener socket follow cur. If cur is NULL, get first socket
1940 * starting from bucket given in st->bucket; when st->bucket is zero the
1941 * very first socket in the hash table is returned.
1943 static void *listening_get_next(struct seq_file *seq, void *cur)
1945 struct tcp_iter_state *st = seq->private;
1946 struct net *net = seq_file_net(seq);
1947 struct inet_listen_hashbucket *ilb;
1948 struct hlist_nulls_node *node;
1949 struct sock *sk = cur;
1953 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1954 spin_lock(&ilb->lock);
1955 sk = sk_nulls_head(&ilb->nulls_head);
1959 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1963 sk = sk_nulls_next(sk);
1965 sk_nulls_for_each_from(sk, node) {
1966 if (!net_eq(sock_net(sk), net))
1968 if (sk->sk_family == st->family)
1971 spin_unlock(&ilb->lock);
1973 if (++st->bucket < INET_LHTABLE_SIZE)
1978 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1980 struct tcp_iter_state *st = seq->private;
1985 rc = listening_get_next(seq, NULL);
1987 while (rc && *pos) {
1988 rc = listening_get_next(seq, rc);
1994 static inline bool empty_bucket(const struct tcp_iter_state *st)
1996 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2000 * Get first established socket starting from bucket given in st->bucket.
2001 * If st->bucket is zero, the very first socket in the hash is returned.
2003 static void *established_get_first(struct seq_file *seq)
2005 struct tcp_iter_state *st = seq->private;
2006 struct net *net = seq_file_net(seq);
2010 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2012 struct hlist_nulls_node *node;
2013 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2015 /* Lockless fast path for the common case of empty buckets */
2016 if (empty_bucket(st))
2020 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2021 if (sk->sk_family != st->family ||
2022 !net_eq(sock_net(sk), net)) {
2028 spin_unlock_bh(lock);
2034 static void *established_get_next(struct seq_file *seq, void *cur)
2036 struct sock *sk = cur;
2037 struct hlist_nulls_node *node;
2038 struct tcp_iter_state *st = seq->private;
2039 struct net *net = seq_file_net(seq);
2044 sk = sk_nulls_next(sk);
2046 sk_nulls_for_each_from(sk, node) {
2047 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2051 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2053 return established_get_first(seq);
2056 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2058 struct tcp_iter_state *st = seq->private;
2062 rc = established_get_first(seq);
2065 rc = established_get_next(seq, rc);
2071 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2074 struct tcp_iter_state *st = seq->private;
2076 st->state = TCP_SEQ_STATE_LISTENING;
2077 rc = listening_get_idx(seq, &pos);
2080 st->state = TCP_SEQ_STATE_ESTABLISHED;
2081 rc = established_get_idx(seq, pos);
2087 static void *tcp_seek_last_pos(struct seq_file *seq)
2089 struct tcp_iter_state *st = seq->private;
2090 int bucket = st->bucket;
2091 int offset = st->offset;
2092 int orig_num = st->num;
2095 switch (st->state) {
2096 case TCP_SEQ_STATE_LISTENING:
2097 if (st->bucket >= INET_LHTABLE_SIZE)
2099 st->state = TCP_SEQ_STATE_LISTENING;
2100 rc = listening_get_next(seq, NULL);
2101 while (offset-- && rc && bucket == st->bucket)
2102 rc = listening_get_next(seq, rc);
2106 st->state = TCP_SEQ_STATE_ESTABLISHED;
2108 case TCP_SEQ_STATE_ESTABLISHED:
2109 if (st->bucket > tcp_hashinfo.ehash_mask)
2111 rc = established_get_first(seq);
2112 while (offset-- && rc && bucket == st->bucket)
2113 rc = established_get_next(seq, rc);
2121 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2123 struct tcp_iter_state *st = seq->private;
2126 if (*pos && *pos == st->last_pos) {
2127 rc = tcp_seek_last_pos(seq);
2132 st->state = TCP_SEQ_STATE_LISTENING;
2136 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2139 st->last_pos = *pos;
2143 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2145 struct tcp_iter_state *st = seq->private;
2148 if (v == SEQ_START_TOKEN) {
2149 rc = tcp_get_idx(seq, 0);
2153 switch (st->state) {
2154 case TCP_SEQ_STATE_LISTENING:
2155 rc = listening_get_next(seq, v);
2157 st->state = TCP_SEQ_STATE_ESTABLISHED;
2160 rc = established_get_first(seq);
2163 case TCP_SEQ_STATE_ESTABLISHED:
2164 rc = established_get_next(seq, v);
2169 st->last_pos = *pos;
2173 static void tcp_seq_stop(struct seq_file *seq, void *v)
2175 struct tcp_iter_state *st = seq->private;
2177 switch (st->state) {
2178 case TCP_SEQ_STATE_LISTENING:
2179 if (v != SEQ_START_TOKEN)
2180 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2182 case TCP_SEQ_STATE_ESTABLISHED:
2184 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2189 int tcp_seq_open(struct inode *inode, struct file *file)
2191 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2192 struct tcp_iter_state *s;
2195 err = seq_open_net(inode, file, &afinfo->seq_ops,
2196 sizeof(struct tcp_iter_state));
2200 s = ((struct seq_file *)file->private_data)->private;
2201 s->family = afinfo->family;
2205 EXPORT_SYMBOL(tcp_seq_open);
2207 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2210 struct proc_dir_entry *p;
2212 afinfo->seq_ops.start = tcp_seq_start;
2213 afinfo->seq_ops.next = tcp_seq_next;
2214 afinfo->seq_ops.stop = tcp_seq_stop;
2216 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2217 afinfo->seq_fops, afinfo);
2222 EXPORT_SYMBOL(tcp_proc_register);
2224 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2226 remove_proc_entry(afinfo->name, net->proc_net);
2228 EXPORT_SYMBOL(tcp_proc_unregister);
2230 static void get_openreq4(const struct request_sock *req,
2231 struct seq_file *f, int i)
2233 const struct inet_request_sock *ireq = inet_rsk(req);
2234 long delta = req->rsk_timer.expires - jiffies;
2236 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2237 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2242 ntohs(ireq->ir_rmt_port),
2244 0, 0, /* could print option size, but that is af dependent. */
2245 1, /* timers active (only the expire timer) */
2246 jiffies_delta_to_clock_t(delta),
2248 from_kuid_munged(seq_user_ns(f),
2249 sock_i_uid(req->rsk_listener)),
2250 0, /* non standard timer */
2251 0, /* open_requests have no inode */
2256 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2259 unsigned long timer_expires;
2260 const struct tcp_sock *tp = tcp_sk(sk);
2261 const struct inet_connection_sock *icsk = inet_csk(sk);
2262 const struct inet_sock *inet = inet_sk(sk);
2263 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2264 __be32 dest = inet->inet_daddr;
2265 __be32 src = inet->inet_rcv_saddr;
2266 __u16 destp = ntohs(inet->inet_dport);
2267 __u16 srcp = ntohs(inet->inet_sport);
2271 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2272 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
2273 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2275 timer_expires = icsk->icsk_timeout;
2276 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2278 timer_expires = icsk->icsk_timeout;
2279 } else if (timer_pending(&sk->sk_timer)) {
2281 timer_expires = sk->sk_timer.expires;
2284 timer_expires = jiffies;
2287 state = sk_state_load(sk);
2288 if (state == TCP_LISTEN)
2289 rx_queue = sk->sk_ack_backlog;
2291 /* Because we don't lock the socket,
2292 * we might find a transient negative value.
2294 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2296 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2297 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2298 i, src, srcp, dest, destp, state,
2299 tp->write_seq - tp->snd_una,
2302 jiffies_delta_to_clock_t(timer_expires - jiffies),
2303 icsk->icsk_retransmits,
2304 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2305 icsk->icsk_probes_out,
2307 refcount_read(&sk->sk_refcnt), sk,
2308 jiffies_to_clock_t(icsk->icsk_rto),
2309 jiffies_to_clock_t(icsk->icsk_ack.ato),
2310 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2312 state == TCP_LISTEN ?
2313 fastopenq->max_qlen :
2314 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2317 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2318 struct seq_file *f, int i)
2320 long delta = tw->tw_timer.expires - jiffies;
2324 dest = tw->tw_daddr;
2325 src = tw->tw_rcv_saddr;
2326 destp = ntohs(tw->tw_dport);
2327 srcp = ntohs(tw->tw_sport);
2329 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2330 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2331 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2332 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2333 refcount_read(&tw->tw_refcnt), tw);
2338 static int tcp4_seq_show(struct seq_file *seq, void *v)
2340 struct tcp_iter_state *st;
2341 struct sock *sk = v;
2343 seq_setwidth(seq, TMPSZ - 1);
2344 if (v == SEQ_START_TOKEN) {
2345 seq_puts(seq, " sl local_address rem_address st tx_queue "
2346 "rx_queue tr tm->when retrnsmt uid timeout "
2352 if (sk->sk_state == TCP_TIME_WAIT)
2353 get_timewait4_sock(v, seq, st->num);
2354 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2355 get_openreq4(v, seq, st->num);
2357 get_tcp4_sock(v, seq, st->num);
2363 static const struct file_operations tcp_afinfo_seq_fops = {
2364 .owner = THIS_MODULE,
2365 .open = tcp_seq_open,
2367 .llseek = seq_lseek,
2368 .release = seq_release_net
2371 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2374 .seq_fops = &tcp_afinfo_seq_fops,
2376 .show = tcp4_seq_show,
2380 static int __net_init tcp4_proc_init_net(struct net *net)
2382 return tcp_proc_register(net, &tcp4_seq_afinfo);
2385 static void __net_exit tcp4_proc_exit_net(struct net *net)
2387 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2390 static struct pernet_operations tcp4_net_ops = {
2391 .init = tcp4_proc_init_net,
2392 .exit = tcp4_proc_exit_net,
2395 int __init tcp4_proc_init(void)
2397 return register_pernet_subsys(&tcp4_net_ops);
2400 void tcp4_proc_exit(void)
2402 unregister_pernet_subsys(&tcp4_net_ops);
2404 #endif /* CONFIG_PROC_FS */
2406 struct proto tcp_prot = {
2408 .owner = THIS_MODULE,
2410 .connect = tcp_v4_connect,
2411 .disconnect = tcp_disconnect,
2412 .accept = inet_csk_accept,
2414 .init = tcp_v4_init_sock,
2415 .destroy = tcp_v4_destroy_sock,
2416 .shutdown = tcp_shutdown,
2417 .setsockopt = tcp_setsockopt,
2418 .getsockopt = tcp_getsockopt,
2419 .keepalive = tcp_set_keepalive,
2420 .recvmsg = tcp_recvmsg,
2421 .sendmsg = tcp_sendmsg,
2422 .sendpage = tcp_sendpage,
2423 .backlog_rcv = tcp_v4_do_rcv,
2424 .release_cb = tcp_release_cb,
2426 .unhash = inet_unhash,
2427 .get_port = inet_csk_get_port,
2428 .enter_memory_pressure = tcp_enter_memory_pressure,
2429 .leave_memory_pressure = tcp_leave_memory_pressure,
2430 .stream_memory_free = tcp_stream_memory_free,
2431 .sockets_allocated = &tcp_sockets_allocated,
2432 .orphan_count = &tcp_orphan_count,
2433 .memory_allocated = &tcp_memory_allocated,
2434 .memory_pressure = &tcp_memory_pressure,
2435 .sysctl_mem = sysctl_tcp_mem,
2436 .sysctl_wmem = sysctl_tcp_wmem,
2437 .sysctl_rmem = sysctl_tcp_rmem,
2438 .max_header = MAX_TCP_HEADER,
2439 .obj_size = sizeof(struct tcp_sock),
2440 .slab_flags = SLAB_TYPESAFE_BY_RCU,
2441 .twsk_prot = &tcp_timewait_sock_ops,
2442 .rsk_prot = &tcp_request_sock_ops,
2443 .h.hashinfo = &tcp_hashinfo,
2444 .no_autobind = true,
2445 #ifdef CONFIG_COMPAT
2446 .compat_setsockopt = compat_tcp_setsockopt,
2447 .compat_getsockopt = compat_tcp_getsockopt,
2449 .diag_destroy = tcp_abort,
2451 EXPORT_SYMBOL(tcp_prot);
2453 static void __net_exit tcp_sk_exit(struct net *net)
2457 for_each_possible_cpu(cpu)
2458 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2459 free_percpu(net->ipv4.tcp_sk);
2462 static int __net_init tcp_sk_init(struct net *net)
2466 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2467 if (!net->ipv4.tcp_sk)
2470 for_each_possible_cpu(cpu) {
2473 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2477 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2479 /* Please enforce IP_DF and IPID==0 for RST and
2480 * ACK sent in SYN-RECV and TIME-WAIT state.
2482 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2484 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2487 net->ipv4.sysctl_tcp_ecn = 2;
2488 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2490 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2491 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
2492 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2493 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2495 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2496 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2497 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2499 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2500 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2501 net->ipv4.sysctl_tcp_syncookies = 1;
2502 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2503 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2504 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2505 net->ipv4.sysctl_tcp_orphan_retries = 0;
2506 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2507 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2508 net->ipv4.sysctl_tcp_tw_reuse = 0;
2510 cnt = tcp_hashinfo.ehash_mask + 1;
2511 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
2512 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2514 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
2515 net->ipv4.sysctl_tcp_sack = 1;
2516 net->ipv4.sysctl_tcp_window_scaling = 1;
2517 net->ipv4.sysctl_tcp_timestamps = 1;
2526 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2528 inet_twsk_purge(&tcp_hashinfo, AF_INET);
2531 static struct pernet_operations __net_initdata tcp_sk_ops = {
2532 .init = tcp_sk_init,
2533 .exit = tcp_sk_exit,
2534 .exit_batch = tcp_sk_exit_batch,
2537 void __init tcp_v4_init(void)
2539 if (register_pernet_subsys(&tcp_sk_ops))
2540 panic("Failed to create the TCP control socket.\n");