GNU Linux-libre 4.19.286-gnu1
[releases.git] / drivers / net / ethernet / stmicro / stmmac / norm_desc.c
1 /*******************************************************************************
2   This contains the functions to handle the normal descriptors.
3
4   Copyright (C) 2007-2009  STMicroelectronics Ltd
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   The full GNU General Public License is included in this distribution in
16   the file called "COPYING".
17
18   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
19 *******************************************************************************/
20
21 #include <linux/stmmac.h>
22 #include "common.h"
23 #include "descs_com.h"
24
25 static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
26                                struct dma_desc *p, void __iomem *ioaddr)
27 {
28         struct net_device_stats *stats = (struct net_device_stats *)data;
29         unsigned int tdes0 = le32_to_cpu(p->des0);
30         unsigned int tdes1 = le32_to_cpu(p->des1);
31         int ret = tx_done;
32
33         /* Get tx owner first */
34         if (unlikely(tdes0 & TDES0_OWN))
35                 return tx_dma_own;
36
37         /* Verify tx error by looking at the last segment. */
38         if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
39                 return tx_not_ls;
40
41         if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
42                 if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
43                         x->tx_underflow++;
44                         stats->tx_fifo_errors++;
45                 }
46                 if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
47                         x->tx_carrier++;
48                         stats->tx_carrier_errors++;
49                 }
50                 if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
51                         x->tx_losscarrier++;
52                         stats->tx_carrier_errors++;
53                 }
54                 if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
55                              (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
56                              (tdes0 & TDES0_LATE_COLLISION))) {
57                         unsigned int collisions;
58
59                         collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
60                         stats->collisions += collisions;
61                 }
62                 ret = tx_err;
63         }
64
65         if (tdes0 & TDES0_VLAN_FRAME)
66                 x->tx_vlan++;
67
68         if (unlikely(tdes0 & TDES0_DEFERRED))
69                 x->tx_deferred++;
70
71         return ret;
72 }
73
74 static int ndesc_get_tx_len(struct dma_desc *p)
75 {
76         return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
77 }
78
79 /* This function verifies if each incoming frame has some errors
80  * and, if required, updates the multicast statistics.
81  * In case of success, it returns good_frame because the GMAC device
82  * is supposed to be able to compute the csum in HW. */
83 static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
84                                struct dma_desc *p)
85 {
86         int ret = good_frame;
87         unsigned int rdes0 = le32_to_cpu(p->des0);
88         struct net_device_stats *stats = (struct net_device_stats *)data;
89
90         if (unlikely(rdes0 & RDES0_OWN))
91                 return dma_own;
92
93         if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
94                 stats->rx_length_errors++;
95                 return discard_frame;
96         }
97
98         if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
99                 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
100                         x->rx_desc++;
101                 if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
102                         x->sa_filter_fail++;
103                 if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
104                         x->overflow_error++;
105                 if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
106                         x->ipc_csum_error++;
107                 if (unlikely(rdes0 & RDES0_COLLISION)) {
108                         x->rx_collision++;
109                         stats->collisions++;
110                 }
111                 if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
112                         x->rx_crc_errors++;
113                         stats->rx_crc_errors++;
114                 }
115                 ret = discard_frame;
116         }
117         if (unlikely(rdes0 & RDES0_DRIBBLING))
118                 x->dribbling_bit++;
119
120         if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
121                 x->rx_length++;
122                 ret = discard_frame;
123         }
124         if (unlikely(rdes0 & RDES0_MII_ERROR)) {
125                 x->rx_mii++;
126                 ret = discard_frame;
127         }
128 #ifdef STMMAC_VLAN_TAG_USED
129         if (rdes0 & RDES0_VLAN_TAG)
130                 x->vlan_tag++;
131 #endif
132         return ret;
133 }
134
135 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
136                                int end, int bfsize)
137 {
138         int bfsize1;
139
140         p->des0 |= cpu_to_le32(RDES0_OWN);
141
142         bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
143         p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
144
145         if (mode == STMMAC_CHAIN_MODE)
146                 ndesc_rx_set_on_chain(p, end);
147         else
148                 ndesc_rx_set_on_ring(p, end, bfsize);
149
150         if (disable_rx_ic)
151                 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
152 }
153
154 static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
155 {
156         p->des0 &= cpu_to_le32(~TDES0_OWN);
157         if (mode == STMMAC_CHAIN_MODE)
158                 ndesc_tx_set_on_chain(p);
159         else
160                 ndesc_end_tx_desc_on_ring(p, end);
161 }
162
163 static int ndesc_get_tx_owner(struct dma_desc *p)
164 {
165         return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
166 }
167
168 static void ndesc_set_tx_owner(struct dma_desc *p)
169 {
170         p->des0 |= cpu_to_le32(TDES0_OWN);
171 }
172
173 static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
174 {
175         p->des0 |= cpu_to_le32(RDES0_OWN);
176 }
177
178 static int ndesc_get_tx_ls(struct dma_desc *p)
179 {
180         return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
181 }
182
183 static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
184 {
185         int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
186
187         memset(p, 0, offsetof(struct dma_desc, des2));
188         if (mode == STMMAC_CHAIN_MODE)
189                 ndesc_tx_set_on_chain(p);
190         else
191                 ndesc_end_tx_desc_on_ring(p, ter);
192 }
193
194 static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
195                                   bool csum_flag, int mode, bool tx_own,
196                                   bool ls, unsigned int tot_pkt_len)
197 {
198         unsigned int tdes1 = le32_to_cpu(p->des1);
199
200         if (is_fs)
201                 tdes1 |= TDES1_FIRST_SEGMENT;
202         else
203                 tdes1 &= ~TDES1_FIRST_SEGMENT;
204
205         if (likely(csum_flag))
206                 tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
207         else
208                 tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
209
210         if (ls)
211                 tdes1 |= TDES1_LAST_SEGMENT;
212
213         p->des1 = cpu_to_le32(tdes1);
214
215         if (mode == STMMAC_CHAIN_MODE)
216                 norm_set_tx_desc_len_on_chain(p, len);
217         else
218                 norm_set_tx_desc_len_on_ring(p, len);
219
220         if (tx_own)
221                 p->des0 |= cpu_to_le32(TDES0_OWN);
222 }
223
224 static void ndesc_set_tx_ic(struct dma_desc *p)
225 {
226         p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
227 }
228
229 static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
230 {
231         unsigned int csum = 0;
232
233         /* The type-1 checksum offload engines append the checksum at
234          * the end of frame and the two bytes of checksum are added in
235          * the length.
236          * Adjust for that in the framelen for type-1 checksum offload
237          * engines
238          */
239         if (rx_coe_type == STMMAC_RX_COE_TYPE1)
240                 csum = 2;
241
242         return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
243                                 >> RDES0_FRAME_LEN_SHIFT) -
244                 csum);
245
246 }
247
248 static void ndesc_enable_tx_timestamp(struct dma_desc *p)
249 {
250         p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
251 }
252
253 static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
254 {
255         return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
256 }
257
258 static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts)
259 {
260         struct dma_desc *p = (struct dma_desc *)desc;
261         u64 ns;
262
263         ns = le32_to_cpu(p->des2);
264         /* convert high/sec time stamp value to nanosecond */
265         ns += le32_to_cpu(p->des3) * 1000000000ULL;
266
267         *ts = ns;
268 }
269
270 static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
271 {
272         struct dma_desc *p = (struct dma_desc *)desc;
273
274         if ((le32_to_cpu(p->des2) == 0xffffffff) &&
275             (le32_to_cpu(p->des3) == 0xffffffff))
276                 /* timestamp is corrupted, hence don't store it */
277                 return 0;
278         else
279                 return 1;
280 }
281
282 static void ndesc_display_ring(void *head, unsigned int size, bool rx)
283 {
284         struct dma_desc *p = (struct dma_desc *)head;
285         int i;
286
287         pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
288
289         for (i = 0; i < size; i++) {
290                 u64 x;
291
292                 x = *(u64 *)p;
293                 pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
294                         i, (unsigned int)virt_to_phys(p),
295                         (unsigned int)x, (unsigned int)(x >> 32),
296                         p->des2, p->des3);
297                 p++;
298         }
299         pr_info("\n");
300 }
301
302 static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
303 {
304         *addr = le32_to_cpu(p->des2);
305 }
306
307 static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
308 {
309         p->des2 = cpu_to_le32(addr);
310 }
311
312 static void ndesc_clear(struct dma_desc *p)
313 {
314         p->des2 = 0;
315 }
316
317 const struct stmmac_desc_ops ndesc_ops = {
318         .tx_status = ndesc_get_tx_status,
319         .rx_status = ndesc_get_rx_status,
320         .get_tx_len = ndesc_get_tx_len,
321         .init_rx_desc = ndesc_init_rx_desc,
322         .init_tx_desc = ndesc_init_tx_desc,
323         .get_tx_owner = ndesc_get_tx_owner,
324         .release_tx_desc = ndesc_release_tx_desc,
325         .prepare_tx_desc = ndesc_prepare_tx_desc,
326         .set_tx_ic = ndesc_set_tx_ic,
327         .get_tx_ls = ndesc_get_tx_ls,
328         .set_tx_owner = ndesc_set_tx_owner,
329         .set_rx_owner = ndesc_set_rx_owner,
330         .get_rx_frame_len = ndesc_get_rx_frame_len,
331         .enable_tx_timestamp = ndesc_enable_tx_timestamp,
332         .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
333         .get_timestamp = ndesc_get_timestamp,
334         .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
335         .display_ring = ndesc_display_ring,
336         .get_addr = ndesc_get_addr,
337         .set_addr = ndesc_set_addr,
338         .clear = ndesc_clear,
339 };