GNU Linux-libre 4.9.337-gnu1
[releases.git] / net / ipv4 / tcp_offload.c
1 /*
2  *      IPV4 GSO/GRO offload support
3  *      Linux INET implementation
4  *
5  *      This program is free software; you can redistribute it and/or
6  *      modify it under the terms of the GNU General Public License
7  *      as published by the Free Software Foundation; either version
8  *      2 of the License, or (at your option) any later version.
9  *
10  *      TCPv4 GSO/GRO support
11  */
12
13 #include <linux/skbuff.h>
14 #include <net/tcp.h>
15 #include <net/protocol.h>
16
17 static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
18                            unsigned int seq, unsigned int mss)
19 {
20         while (skb) {
21                 if (before(ts_seq, seq + mss)) {
22                         skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
23                         skb_shinfo(skb)->tskey = ts_seq;
24                         return;
25                 }
26
27                 skb = skb->next;
28                 seq += mss;
29         }
30 }
31
32 static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
33                                         netdev_features_t features)
34 {
35         if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
36                 return ERR_PTR(-EINVAL);
37
38         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
39                 return ERR_PTR(-EINVAL);
40
41         if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
42                 const struct iphdr *iph = ip_hdr(skb);
43                 struct tcphdr *th = tcp_hdr(skb);
44
45                 /* Set up checksum pseudo header, usually expect stack to
46                  * have done this already.
47                  */
48
49                 th->check = 0;
50                 skb->ip_summed = CHECKSUM_PARTIAL;
51                 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
52         }
53
54         return tcp_gso_segment(skb, features);
55 }
56
57 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
58                                 netdev_features_t features)
59 {
60         struct sk_buff *segs = ERR_PTR(-EINVAL);
61         unsigned int sum_truesize = 0;
62         struct tcphdr *th;
63         unsigned int thlen;
64         unsigned int seq;
65         __be32 delta;
66         unsigned int oldlen;
67         unsigned int mss;
68         struct sk_buff *gso_skb = skb;
69         __sum16 newcheck;
70         bool ooo_okay, copy_destructor;
71
72         th = tcp_hdr(skb);
73         thlen = th->doff * 4;
74         if (thlen < sizeof(*th))
75                 goto out;
76
77         if (!pskb_may_pull(skb, thlen))
78                 goto out;
79
80         oldlen = (u16)~skb->len;
81         __skb_pull(skb, thlen);
82
83         mss = skb_shinfo(skb)->gso_size;
84         if (unlikely(skb->len <= mss))
85                 goto out;
86
87         if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
88                 /* Packet is from an untrusted source, reset gso_segs. */
89
90                 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
91
92                 segs = NULL;
93                 goto out;
94         }
95
96         copy_destructor = gso_skb->destructor == tcp_wfree;
97         ooo_okay = gso_skb->ooo_okay;
98         /* All segments but the first should have ooo_okay cleared */
99         skb->ooo_okay = 0;
100
101         segs = skb_segment(skb, features);
102         if (IS_ERR(segs))
103                 goto out;
104
105         /* Only first segment might have ooo_okay set */
106         segs->ooo_okay = ooo_okay;
107
108         /* GSO partial and frag_list segmentation only requires splitting
109          * the frame into an MSS multiple and possibly a remainder, both
110          * cases return a GSO skb. So update the mss now.
111          */
112         if (skb_is_gso(segs))
113                 mss *= skb_shinfo(segs)->gso_segs;
114
115         delta = htonl(oldlen + (thlen + mss));
116
117         skb = segs;
118         th = tcp_hdr(skb);
119         seq = ntohl(th->seq);
120
121         if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
122                 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
123
124         newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
125                                                (__force u32)delta));
126
127         while (skb->next) {
128                 th->fin = th->psh = 0;
129                 th->check = newcheck;
130
131                 if (skb->ip_summed == CHECKSUM_PARTIAL)
132                         gso_reset_checksum(skb, ~th->check);
133                 else
134                         th->check = gso_make_checksum(skb, ~th->check);
135
136                 seq += mss;
137                 if (copy_destructor) {
138                         skb->destructor = gso_skb->destructor;
139                         skb->sk = gso_skb->sk;
140                         sum_truesize += skb->truesize;
141                 }
142                 skb = skb->next;
143                 th = tcp_hdr(skb);
144
145                 th->seq = htonl(seq);
146                 th->cwr = 0;
147         }
148
149         /* Following permits TCP Small Queues to work well with GSO :
150          * The callback to TCP stack will be called at the time last frag
151          * is freed at TX completion, and not right now when gso_skb
152          * is freed by GSO engine
153          */
154         if (copy_destructor) {
155                 swap(gso_skb->sk, skb->sk);
156                 swap(gso_skb->destructor, skb->destructor);
157                 sum_truesize += skb->truesize;
158                 atomic_add(sum_truesize - gso_skb->truesize,
159                            &skb->sk->sk_wmem_alloc);
160         }
161
162         delta = htonl(oldlen + (skb_tail_pointer(skb) -
163                                 skb_transport_header(skb)) +
164                       skb->data_len);
165         th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
166                                 (__force u32)delta));
167         if (skb->ip_summed == CHECKSUM_PARTIAL)
168                 gso_reset_checksum(skb, ~th->check);
169         else
170                 th->check = gso_make_checksum(skb, ~th->check);
171 out:
172         return segs;
173 }
174
175 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
176 {
177         struct sk_buff **pp = NULL;
178         struct sk_buff *p;
179         struct tcphdr *th;
180         struct tcphdr *th2;
181         unsigned int len;
182         unsigned int thlen;
183         __be32 flags;
184         unsigned int mss = 1;
185         unsigned int hlen;
186         unsigned int off;
187         int flush = 1;
188         int i;
189
190         off = skb_gro_offset(skb);
191         hlen = off + sizeof(*th);
192         th = skb_gro_header_fast(skb, off);
193         if (skb_gro_header_hard(skb, hlen)) {
194                 th = skb_gro_header_slow(skb, hlen, off);
195                 if (unlikely(!th))
196                         goto out;
197         }
198
199         thlen = th->doff * 4;
200         if (thlen < sizeof(*th))
201                 goto out;
202
203         hlen = off + thlen;
204         if (skb_gro_header_hard(skb, hlen)) {
205                 th = skb_gro_header_slow(skb, hlen, off);
206                 if (unlikely(!th))
207                         goto out;
208         }
209
210         skb_gro_pull(skb, thlen);
211
212         len = skb_gro_len(skb);
213         flags = tcp_flag_word(th);
214
215         for (; (p = *head); head = &p->next) {
216                 if (!NAPI_GRO_CB(p)->same_flow)
217                         continue;
218
219                 th2 = tcp_hdr(p);
220
221                 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
222                         NAPI_GRO_CB(p)->same_flow = 0;
223                         continue;
224                 }
225
226                 goto found;
227         }
228
229         goto out_check_final;
230
231 found:
232         /* Include the IP ID check below from the inner most IP hdr */
233         flush = NAPI_GRO_CB(p)->flush;
234         flush |= (__force int)(flags & TCP_FLAG_CWR);
235         flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
236                   ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
237         flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
238         for (i = sizeof(*th); i < thlen; i += 4)
239                 flush |= *(u32 *)((u8 *)th + i) ^
240                          *(u32 *)((u8 *)th2 + i);
241
242         /* When we receive our second frame we can made a decision on if we
243          * continue this flow as an atomic flow with a fixed ID or if we use
244          * an incrementing ID.
245          */
246         if (NAPI_GRO_CB(p)->flush_id != 1 ||
247             NAPI_GRO_CB(p)->count != 1 ||
248             !NAPI_GRO_CB(p)->is_atomic)
249                 flush |= NAPI_GRO_CB(p)->flush_id;
250         else
251                 NAPI_GRO_CB(p)->is_atomic = false;
252
253         mss = skb_shinfo(p)->gso_size;
254
255         flush |= (len - 1) >= mss;
256         flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
257
258         if (flush || skb_gro_receive(head, skb)) {
259                 mss = 1;
260                 goto out_check_final;
261         }
262
263         p = *head;
264         th2 = tcp_hdr(p);
265         tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
266
267 out_check_final:
268         flush = len < mss;
269         flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
270                                         TCP_FLAG_RST | TCP_FLAG_SYN |
271                                         TCP_FLAG_FIN));
272
273         if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
274                 pp = head;
275
276 out:
277         NAPI_GRO_CB(skb)->flush |= (flush != 0);
278
279         return pp;
280 }
281
282 int tcp_gro_complete(struct sk_buff *skb)
283 {
284         struct tcphdr *th = tcp_hdr(skb);
285
286         skb->csum_start = (unsigned char *)th - skb->head;
287         skb->csum_offset = offsetof(struct tcphdr, check);
288         skb->ip_summed = CHECKSUM_PARTIAL;
289
290         skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
291
292         if (th->cwr)
293                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
294
295         return 0;
296 }
297 EXPORT_SYMBOL(tcp_gro_complete);
298
299 static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
300 {
301         /* Don't bother verifying checksum if we're going to flush anyway. */
302         if (!NAPI_GRO_CB(skb)->flush &&
303             skb_gro_checksum_validate(skb, IPPROTO_TCP,
304                                       inet_gro_compute_pseudo)) {
305                 NAPI_GRO_CB(skb)->flush = 1;
306                 return NULL;
307         }
308
309         return tcp_gro_receive(head, skb);
310 }
311
312 static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
313 {
314         const struct iphdr *iph = ip_hdr(skb);
315         struct tcphdr *th = tcp_hdr(skb);
316
317         th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
318                                   iph->daddr, 0);
319         skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
320
321         if (NAPI_GRO_CB(skb)->is_atomic)
322                 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
323
324         return tcp_gro_complete(skb);
325 }
326
327 static const struct net_offload tcpv4_offload = {
328         .callbacks = {
329                 .gso_segment    =       tcp4_gso_segment,
330                 .gro_receive    =       tcp4_gro_receive,
331                 .gro_complete   =       tcp4_gro_complete,
332         },
333 };
334
335 int __init tcpv4_offload_init(void)
336 {
337         return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
338 }