GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / net / ethernet / intel / i40evf / i40e_adminq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "i40e_status.h"
5 #include "i40e_type.h"
6 #include "i40e_register.h"
7 #include "i40e_adminq.h"
8 #include "i40e_prototype.h"
9
10 /**
11  * i40e_is_nvm_update_op - return true if this is an NVM update operation
12  * @desc: API request descriptor
13  **/
14 static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
15 {
16         return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
17                (desc->opcode == i40e_aqc_opc_nvm_update);
18 }
19
20 /**
21  *  i40e_adminq_init_regs - Initialize AdminQ registers
22  *  @hw: pointer to the hardware structure
23  *
24  *  This assumes the alloc_asq and alloc_arq functions have already been called
25  **/
26 static void i40e_adminq_init_regs(struct i40e_hw *hw)
27 {
28         /* set head and tail registers in our local struct */
29         if (i40e_is_vf(hw)) {
30                 hw->aq.asq.tail = I40E_VF_ATQT1;
31                 hw->aq.asq.head = I40E_VF_ATQH1;
32                 hw->aq.asq.len  = I40E_VF_ATQLEN1;
33                 hw->aq.asq.bal  = I40E_VF_ATQBAL1;
34                 hw->aq.asq.bah  = I40E_VF_ATQBAH1;
35                 hw->aq.arq.tail = I40E_VF_ARQT1;
36                 hw->aq.arq.head = I40E_VF_ARQH1;
37                 hw->aq.arq.len  = I40E_VF_ARQLEN1;
38                 hw->aq.arq.bal  = I40E_VF_ARQBAL1;
39                 hw->aq.arq.bah  = I40E_VF_ARQBAH1;
40         }
41 }
42
43 /**
44  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
45  *  @hw: pointer to the hardware structure
46  **/
47 static i40e_status i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
48 {
49         i40e_status ret_code;
50
51         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
52                                          i40e_mem_atq_ring,
53                                          (hw->aq.num_asq_entries *
54                                          sizeof(struct i40e_aq_desc)),
55                                          I40E_ADMINQ_DESC_ALIGNMENT);
56         if (ret_code)
57                 return ret_code;
58
59         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
60                                           (hw->aq.num_asq_entries *
61                                           sizeof(struct i40e_asq_cmd_details)));
62         if (ret_code) {
63                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
64                 return ret_code;
65         }
66
67         return ret_code;
68 }
69
70 /**
71  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
72  *  @hw: pointer to the hardware structure
73  **/
74 static i40e_status i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
75 {
76         i40e_status ret_code;
77
78         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
79                                          i40e_mem_arq_ring,
80                                          (hw->aq.num_arq_entries *
81                                          sizeof(struct i40e_aq_desc)),
82                                          I40E_ADMINQ_DESC_ALIGNMENT);
83
84         return ret_code;
85 }
86
87 /**
88  *  i40e_free_adminq_asq - Free Admin Queue send rings
89  *  @hw: pointer to the hardware structure
90  *
91  *  This assumes the posted send buffers have already been cleaned
92  *  and de-allocated
93  **/
94 static void i40e_free_adminq_asq(struct i40e_hw *hw)
95 {
96         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
97 }
98
99 /**
100  *  i40e_free_adminq_arq - Free Admin Queue receive rings
101  *  @hw: pointer to the hardware structure
102  *
103  *  This assumes the posted receive buffers have already been cleaned
104  *  and de-allocated
105  **/
106 static void i40e_free_adminq_arq(struct i40e_hw *hw)
107 {
108         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
109 }
110
111 /**
112  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
113  *  @hw: pointer to the hardware structure
114  **/
115 static i40e_status i40e_alloc_arq_bufs(struct i40e_hw *hw)
116 {
117         i40e_status ret_code;
118         struct i40e_aq_desc *desc;
119         struct i40e_dma_mem *bi;
120         int i;
121
122         /* We'll be allocating the buffer info memory first, then we can
123          * allocate the mapped buffers for the event processing
124          */
125
126         /* buffer_info structures do not need alignment */
127         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
128                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
129         if (ret_code)
130                 goto alloc_arq_bufs;
131         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
132
133         /* allocate the mapped buffers */
134         for (i = 0; i < hw->aq.num_arq_entries; i++) {
135                 bi = &hw->aq.arq.r.arq_bi[i];
136                 ret_code = i40e_allocate_dma_mem(hw, bi,
137                                                  i40e_mem_arq_buf,
138                                                  hw->aq.arq_buf_size,
139                                                  I40E_ADMINQ_DESC_ALIGNMENT);
140                 if (ret_code)
141                         goto unwind_alloc_arq_bufs;
142
143                 /* now configure the descriptors for use */
144                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
145
146                 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
147                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
148                         desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
149                 desc->opcode = 0;
150                 /* This is in accordance with Admin queue design, there is no
151                  * register for buffer size configuration
152                  */
153                 desc->datalen = cpu_to_le16((u16)bi->size);
154                 desc->retval = 0;
155                 desc->cookie_high = 0;
156                 desc->cookie_low = 0;
157                 desc->params.external.addr_high =
158                         cpu_to_le32(upper_32_bits(bi->pa));
159                 desc->params.external.addr_low =
160                         cpu_to_le32(lower_32_bits(bi->pa));
161                 desc->params.external.param0 = 0;
162                 desc->params.external.param1 = 0;
163         }
164
165 alloc_arq_bufs:
166         return ret_code;
167
168 unwind_alloc_arq_bufs:
169         /* don't try to free the one that failed... */
170         i--;
171         for (; i >= 0; i--)
172                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
173         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
174
175         return ret_code;
176 }
177
178 /**
179  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
180  *  @hw: pointer to the hardware structure
181  **/
182 static i40e_status i40e_alloc_asq_bufs(struct i40e_hw *hw)
183 {
184         i40e_status ret_code;
185         struct i40e_dma_mem *bi;
186         int i;
187
188         /* No mapped memory needed yet, just the buffer info structures */
189         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
190                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
191         if (ret_code)
192                 goto alloc_asq_bufs;
193         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
194
195         /* allocate the mapped buffers */
196         for (i = 0; i < hw->aq.num_asq_entries; i++) {
197                 bi = &hw->aq.asq.r.asq_bi[i];
198                 ret_code = i40e_allocate_dma_mem(hw, bi,
199                                                  i40e_mem_asq_buf,
200                                                  hw->aq.asq_buf_size,
201                                                  I40E_ADMINQ_DESC_ALIGNMENT);
202                 if (ret_code)
203                         goto unwind_alloc_asq_bufs;
204         }
205 alloc_asq_bufs:
206         return ret_code;
207
208 unwind_alloc_asq_bufs:
209         /* don't try to free the one that failed... */
210         i--;
211         for (; i >= 0; i--)
212                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
213         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
214
215         return ret_code;
216 }
217
218 /**
219  *  i40e_free_arq_bufs - Free receive queue buffer info elements
220  *  @hw: pointer to the hardware structure
221  **/
222 static void i40e_free_arq_bufs(struct i40e_hw *hw)
223 {
224         int i;
225
226         /* free descriptors */
227         for (i = 0; i < hw->aq.num_arq_entries; i++)
228                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
229
230         /* free the descriptor memory */
231         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
232
233         /* free the dma header */
234         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
235 }
236
237 /**
238  *  i40e_free_asq_bufs - Free send queue buffer info elements
239  *  @hw: pointer to the hardware structure
240  **/
241 static void i40e_free_asq_bufs(struct i40e_hw *hw)
242 {
243         int i;
244
245         /* only unmap if the address is non-NULL */
246         for (i = 0; i < hw->aq.num_asq_entries; i++)
247                 if (hw->aq.asq.r.asq_bi[i].pa)
248                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
249
250         /* free the buffer info list */
251         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
252
253         /* free the descriptor memory */
254         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
255
256         /* free the dma header */
257         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
258 }
259
260 /**
261  *  i40e_config_asq_regs - configure ASQ registers
262  *  @hw: pointer to the hardware structure
263  *
264  *  Configure base address and length registers for the transmit queue
265  **/
266 static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
267 {
268         i40e_status ret_code = 0;
269         u32 reg = 0;
270
271         /* Clear Head and Tail */
272         wr32(hw, hw->aq.asq.head, 0);
273         wr32(hw, hw->aq.asq.tail, 0);
274
275         /* set starting point */
276         wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
277                                   I40E_VF_ATQLEN1_ATQENABLE_MASK));
278         wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
279         wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
280
281         /* Check one register to verify that config was applied */
282         reg = rd32(hw, hw->aq.asq.bal);
283         if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
284                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
285
286         return ret_code;
287 }
288
289 /**
290  *  i40e_config_arq_regs - ARQ register configuration
291  *  @hw: pointer to the hardware structure
292  *
293  * Configure base address and length registers for the receive (event queue)
294  **/
295 static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
296 {
297         i40e_status ret_code = 0;
298         u32 reg = 0;
299
300         /* Clear Head and Tail */
301         wr32(hw, hw->aq.arq.head, 0);
302         wr32(hw, hw->aq.arq.tail, 0);
303
304         /* set starting point */
305         wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
306                                   I40E_VF_ARQLEN1_ARQENABLE_MASK));
307         wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
308         wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
309
310         /* Update tail in the HW to post pre-allocated buffers */
311         wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
312
313         /* Check one register to verify that config was applied */
314         reg = rd32(hw, hw->aq.arq.bal);
315         if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
316                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
317
318         return ret_code;
319 }
320
321 /**
322  *  i40e_init_asq - main initialization routine for ASQ
323  *  @hw: pointer to the hardware structure
324  *
325  *  This is the main initialization routine for the Admin Send Queue
326  *  Prior to calling this function, drivers *MUST* set the following fields
327  *  in the hw->aq structure:
328  *     - hw->aq.num_asq_entries
329  *     - hw->aq.arq_buf_size
330  *
331  *  Do *NOT* hold the lock when calling this as the memory allocation routines
332  *  called are not going to be atomic context safe
333  **/
334 static i40e_status i40e_init_asq(struct i40e_hw *hw)
335 {
336         i40e_status ret_code = 0;
337
338         if (hw->aq.asq.count > 0) {
339                 /* queue already initialized */
340                 ret_code = I40E_ERR_NOT_READY;
341                 goto init_adminq_exit;
342         }
343
344         /* verify input for valid configuration */
345         if ((hw->aq.num_asq_entries == 0) ||
346             (hw->aq.asq_buf_size == 0)) {
347                 ret_code = I40E_ERR_CONFIG;
348                 goto init_adminq_exit;
349         }
350
351         hw->aq.asq.next_to_use = 0;
352         hw->aq.asq.next_to_clean = 0;
353
354         /* allocate the ring memory */
355         ret_code = i40e_alloc_adminq_asq_ring(hw);
356         if (ret_code)
357                 goto init_adminq_exit;
358
359         /* allocate buffers in the rings */
360         ret_code = i40e_alloc_asq_bufs(hw);
361         if (ret_code)
362                 goto init_adminq_free_rings;
363
364         /* initialize base registers */
365         ret_code = i40e_config_asq_regs(hw);
366         if (ret_code)
367                 goto init_adminq_free_rings;
368
369         /* success! */
370         hw->aq.asq.count = hw->aq.num_asq_entries;
371         goto init_adminq_exit;
372
373 init_adminq_free_rings:
374         i40e_free_adminq_asq(hw);
375
376 init_adminq_exit:
377         return ret_code;
378 }
379
380 /**
381  *  i40e_init_arq - initialize ARQ
382  *  @hw: pointer to the hardware structure
383  *
384  *  The main initialization routine for the Admin Receive (Event) Queue.
385  *  Prior to calling this function, drivers *MUST* set the following fields
386  *  in the hw->aq structure:
387  *     - hw->aq.num_asq_entries
388  *     - hw->aq.arq_buf_size
389  *
390  *  Do *NOT* hold the lock when calling this as the memory allocation routines
391  *  called are not going to be atomic context safe
392  **/
393 static i40e_status i40e_init_arq(struct i40e_hw *hw)
394 {
395         i40e_status ret_code = 0;
396
397         if (hw->aq.arq.count > 0) {
398                 /* queue already initialized */
399                 ret_code = I40E_ERR_NOT_READY;
400                 goto init_adminq_exit;
401         }
402
403         /* verify input for valid configuration */
404         if ((hw->aq.num_arq_entries == 0) ||
405             (hw->aq.arq_buf_size == 0)) {
406                 ret_code = I40E_ERR_CONFIG;
407                 goto init_adminq_exit;
408         }
409
410         hw->aq.arq.next_to_use = 0;
411         hw->aq.arq.next_to_clean = 0;
412
413         /* allocate the ring memory */
414         ret_code = i40e_alloc_adminq_arq_ring(hw);
415         if (ret_code)
416                 goto init_adminq_exit;
417
418         /* allocate buffers in the rings */
419         ret_code = i40e_alloc_arq_bufs(hw);
420         if (ret_code)
421                 goto init_adminq_free_rings;
422
423         /* initialize base registers */
424         ret_code = i40e_config_arq_regs(hw);
425         if (ret_code)
426                 goto init_adminq_free_rings;
427
428         /* success! */
429         hw->aq.arq.count = hw->aq.num_arq_entries;
430         goto init_adminq_exit;
431
432 init_adminq_free_rings:
433         i40e_free_adminq_arq(hw);
434
435 init_adminq_exit:
436         return ret_code;
437 }
438
439 /**
440  *  i40e_shutdown_asq - shutdown the ASQ
441  *  @hw: pointer to the hardware structure
442  *
443  *  The main shutdown routine for the Admin Send Queue
444  **/
445 static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
446 {
447         i40e_status ret_code = 0;
448
449         mutex_lock(&hw->aq.asq_mutex);
450
451         if (hw->aq.asq.count == 0) {
452                 ret_code = I40E_ERR_NOT_READY;
453                 goto shutdown_asq_out;
454         }
455
456         /* Stop firmware AdminQ processing */
457         wr32(hw, hw->aq.asq.head, 0);
458         wr32(hw, hw->aq.asq.tail, 0);
459         wr32(hw, hw->aq.asq.len, 0);
460         wr32(hw, hw->aq.asq.bal, 0);
461         wr32(hw, hw->aq.asq.bah, 0);
462
463         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
464
465         /* free ring buffers */
466         i40e_free_asq_bufs(hw);
467
468 shutdown_asq_out:
469         mutex_unlock(&hw->aq.asq_mutex);
470         return ret_code;
471 }
472
473 /**
474  *  i40e_shutdown_arq - shutdown ARQ
475  *  @hw: pointer to the hardware structure
476  *
477  *  The main shutdown routine for the Admin Receive Queue
478  **/
479 static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
480 {
481         i40e_status ret_code = 0;
482
483         mutex_lock(&hw->aq.arq_mutex);
484
485         if (hw->aq.arq.count == 0) {
486                 ret_code = I40E_ERR_NOT_READY;
487                 goto shutdown_arq_out;
488         }
489
490         /* Stop firmware AdminQ processing */
491         wr32(hw, hw->aq.arq.head, 0);
492         wr32(hw, hw->aq.arq.tail, 0);
493         wr32(hw, hw->aq.arq.len, 0);
494         wr32(hw, hw->aq.arq.bal, 0);
495         wr32(hw, hw->aq.arq.bah, 0);
496
497         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
498
499         /* free ring buffers */
500         i40e_free_arq_bufs(hw);
501
502 shutdown_arq_out:
503         mutex_unlock(&hw->aq.arq_mutex);
504         return ret_code;
505 }
506
507 /**
508  *  i40evf_init_adminq - main initialization routine for Admin Queue
509  *  @hw: pointer to the hardware structure
510  *
511  *  Prior to calling this function, drivers *MUST* set the following fields
512  *  in the hw->aq structure:
513  *     - hw->aq.num_asq_entries
514  *     - hw->aq.num_arq_entries
515  *     - hw->aq.arq_buf_size
516  *     - hw->aq.asq_buf_size
517  **/
518 i40e_status i40evf_init_adminq(struct i40e_hw *hw)
519 {
520         i40e_status ret_code;
521
522         /* verify input for valid configuration */
523         if ((hw->aq.num_arq_entries == 0) ||
524             (hw->aq.num_asq_entries == 0) ||
525             (hw->aq.arq_buf_size == 0) ||
526             (hw->aq.asq_buf_size == 0)) {
527                 ret_code = I40E_ERR_CONFIG;
528                 goto init_adminq_exit;
529         }
530
531         /* Set up register offsets */
532         i40e_adminq_init_regs(hw);
533
534         /* setup ASQ command write back timeout */
535         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
536
537         /* allocate the ASQ */
538         ret_code = i40e_init_asq(hw);
539         if (ret_code)
540                 goto init_adminq_destroy_locks;
541
542         /* allocate the ARQ */
543         ret_code = i40e_init_arq(hw);
544         if (ret_code)
545                 goto init_adminq_free_asq;
546
547         /* success! */
548         goto init_adminq_exit;
549
550 init_adminq_free_asq:
551         i40e_shutdown_asq(hw);
552 init_adminq_destroy_locks:
553
554 init_adminq_exit:
555         return ret_code;
556 }
557
558 /**
559  *  i40evf_shutdown_adminq - shutdown routine for the Admin Queue
560  *  @hw: pointer to the hardware structure
561  **/
562 i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
563 {
564         i40e_status ret_code = 0;
565
566         if (i40evf_check_asq_alive(hw))
567                 i40evf_aq_queue_shutdown(hw, true);
568
569         i40e_shutdown_asq(hw);
570         i40e_shutdown_arq(hw);
571
572         if (hw->nvm_buff.va)
573                 i40e_free_virt_mem(hw, &hw->nvm_buff);
574
575         return ret_code;
576 }
577
578 /**
579  *  i40e_clean_asq - cleans Admin send queue
580  *  @hw: pointer to the hardware structure
581  *
582  *  returns the number of free desc
583  **/
584 static u16 i40e_clean_asq(struct i40e_hw *hw)
585 {
586         struct i40e_adminq_ring *asq = &(hw->aq.asq);
587         struct i40e_asq_cmd_details *details;
588         u16 ntc = asq->next_to_clean;
589         struct i40e_aq_desc desc_cb;
590         struct i40e_aq_desc *desc;
591
592         desc = I40E_ADMINQ_DESC(*asq, ntc);
593         details = I40E_ADMINQ_DETAILS(*asq, ntc);
594         while (rd32(hw, hw->aq.asq.head) != ntc) {
595                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
596                            "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
597
598                 if (details->callback) {
599                         I40E_ADMINQ_CALLBACK cb_func =
600                                         (I40E_ADMINQ_CALLBACK)details->callback;
601                         desc_cb = *desc;
602                         cb_func(hw, &desc_cb);
603                 }
604                 memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
605                 memset((void *)details, 0,
606                        sizeof(struct i40e_asq_cmd_details));
607                 ntc++;
608                 if (ntc == asq->count)
609                         ntc = 0;
610                 desc = I40E_ADMINQ_DESC(*asq, ntc);
611                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
612         }
613
614         asq->next_to_clean = ntc;
615
616         return I40E_DESC_UNUSED(asq);
617 }
618
619 /**
620  *  i40evf_asq_done - check if FW has processed the Admin Send Queue
621  *  @hw: pointer to the hw struct
622  *
623  *  Returns true if the firmware has processed all descriptors on the
624  *  admin send queue. Returns false if there are still requests pending.
625  **/
626 bool i40evf_asq_done(struct i40e_hw *hw)
627 {
628         /* AQ designers suggest use of head for better
629          * timing reliability than DD bit
630          */
631         return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
632
633 }
634
635 /**
636  *  i40evf_asq_send_command - send command to Admin Queue
637  *  @hw: pointer to the hw struct
638  *  @desc: prefilled descriptor describing the command (non DMA mem)
639  *  @buff: buffer to use for indirect commands
640  *  @buff_size: size of buffer for indirect commands
641  *  @cmd_details: pointer to command details structure
642  *
643  *  This is the main send command driver routine for the Admin Queue send
644  *  queue.  It runs the queue, cleans the queue, etc
645  **/
646 i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
647                                 struct i40e_aq_desc *desc,
648                                 void *buff, /* can be NULL */
649                                 u16  buff_size,
650                                 struct i40e_asq_cmd_details *cmd_details)
651 {
652         i40e_status status = 0;
653         struct i40e_dma_mem *dma_buff = NULL;
654         struct i40e_asq_cmd_details *details;
655         struct i40e_aq_desc *desc_on_ring;
656         bool cmd_completed = false;
657         u16  retval = 0;
658         u32  val = 0;
659
660         mutex_lock(&hw->aq.asq_mutex);
661
662         if (hw->aq.asq.count == 0) {
663                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
664                            "AQTX: Admin queue not initialized.\n");
665                 status = I40E_ERR_QUEUE_EMPTY;
666                 goto asq_send_command_error;
667         }
668
669         hw->aq.asq_last_status = I40E_AQ_RC_OK;
670
671         val = rd32(hw, hw->aq.asq.head);
672         if (val >= hw->aq.num_asq_entries) {
673                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
674                            "AQTX: head overrun at %d\n", val);
675                 status = I40E_ERR_QUEUE_EMPTY;
676                 goto asq_send_command_error;
677         }
678
679         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
680         if (cmd_details) {
681                 *details = *cmd_details;
682
683                 /* If the cmd_details are defined copy the cookie.  The
684                  * cpu_to_le32 is not needed here because the data is ignored
685                  * by the FW, only used by the driver
686                  */
687                 if (details->cookie) {
688                         desc->cookie_high =
689                                 cpu_to_le32(upper_32_bits(details->cookie));
690                         desc->cookie_low =
691                                 cpu_to_le32(lower_32_bits(details->cookie));
692                 }
693         } else {
694                 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
695         }
696
697         /* clear requested flags and then set additional flags if defined */
698         desc->flags &= ~cpu_to_le16(details->flags_dis);
699         desc->flags |= cpu_to_le16(details->flags_ena);
700
701         if (buff_size > hw->aq.asq_buf_size) {
702                 i40e_debug(hw,
703                            I40E_DEBUG_AQ_MESSAGE,
704                            "AQTX: Invalid buffer size: %d.\n",
705                            buff_size);
706                 status = I40E_ERR_INVALID_SIZE;
707                 goto asq_send_command_error;
708         }
709
710         if (details->postpone && !details->async) {
711                 i40e_debug(hw,
712                            I40E_DEBUG_AQ_MESSAGE,
713                            "AQTX: Async flag not set along with postpone flag");
714                 status = I40E_ERR_PARAM;
715                 goto asq_send_command_error;
716         }
717
718         /* call clean and check queue available function to reclaim the
719          * descriptors that were processed by FW, the function returns the
720          * number of desc available
721          */
722         /* the clean function called here could be called in a separate thread
723          * in case of asynchronous completions
724          */
725         if (i40e_clean_asq(hw) == 0) {
726                 i40e_debug(hw,
727                            I40E_DEBUG_AQ_MESSAGE,
728                            "AQTX: Error queue is full.\n");
729                 status = I40E_ERR_ADMIN_QUEUE_FULL;
730                 goto asq_send_command_error;
731         }
732
733         /* initialize the temp desc pointer with the right desc */
734         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
735
736         /* if the desc is available copy the temp desc to the right place */
737         *desc_on_ring = *desc;
738
739         /* if buff is not NULL assume indirect command */
740         if (buff != NULL) {
741                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
742                 /* copy the user buff into the respective DMA buff */
743                 memcpy(dma_buff->va, buff, buff_size);
744                 desc_on_ring->datalen = cpu_to_le16(buff_size);
745
746                 /* Update the address values in the desc with the pa value
747                  * for respective buffer
748                  */
749                 desc_on_ring->params.external.addr_high =
750                                 cpu_to_le32(upper_32_bits(dma_buff->pa));
751                 desc_on_ring->params.external.addr_low =
752                                 cpu_to_le32(lower_32_bits(dma_buff->pa));
753         }
754
755         /* bump the tail */
756         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
757         i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
758                         buff, buff_size);
759         (hw->aq.asq.next_to_use)++;
760         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
761                 hw->aq.asq.next_to_use = 0;
762         if (!details->postpone)
763                 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
764
765         /* if cmd_details are not defined or async flag is not set,
766          * we need to wait for desc write back
767          */
768         if (!details->async && !details->postpone) {
769                 u32 total_delay = 0;
770
771                 do {
772                         /* AQ designers suggest use of head for better
773                          * timing reliability than DD bit
774                          */
775                         if (i40evf_asq_done(hw))
776                                 break;
777                         udelay(50);
778                         total_delay += 50;
779                 } while (total_delay < hw->aq.asq_cmd_timeout);
780         }
781
782         /* if ready, copy the desc back to temp */
783         if (i40evf_asq_done(hw)) {
784                 *desc = *desc_on_ring;
785                 if (buff != NULL)
786                         memcpy(buff, dma_buff->va, buff_size);
787                 retval = le16_to_cpu(desc->retval);
788                 if (retval != 0) {
789                         i40e_debug(hw,
790                                    I40E_DEBUG_AQ_MESSAGE,
791                                    "AQTX: Command completed with error 0x%X.\n",
792                                    retval);
793
794                         /* strip off FW internal code */
795                         retval &= 0xff;
796                 }
797                 cmd_completed = true;
798                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
799                         status = 0;
800                 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
801                         status = I40E_ERR_NOT_READY;
802                 else
803                         status = I40E_ERR_ADMIN_QUEUE_ERROR;
804                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
805         }
806
807         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
808                    "AQTX: desc and buffer writeback:\n");
809         i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff,
810                         buff_size);
811
812         /* save writeback aq if requested */
813         if (details->wb_desc)
814                 *details->wb_desc = *desc_on_ring;
815
816         /* update the error if time out occurred */
817         if ((!cmd_completed) &&
818             (!details->async && !details->postpone)) {
819                 if (rd32(hw, hw->aq.asq.len) & I40E_VF_ATQLEN1_ATQCRIT_MASK) {
820                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
821                                    "AQTX: AQ Critical error.\n");
822                         status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
823                 } else {
824                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
825                                    "AQTX: Writeback timeout.\n");
826                         status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
827                 }
828         }
829
830 asq_send_command_error:
831         mutex_unlock(&hw->aq.asq_mutex);
832         return status;
833 }
834
835 /**
836  *  i40evf_fill_default_direct_cmd_desc - AQ descriptor helper function
837  *  @desc:     pointer to the temp descriptor (non DMA mem)
838  *  @opcode:   the opcode can be used to decide which flags to turn off or on
839  *
840  *  Fill the desc with default values
841  **/
842 void i40evf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
843                                        u16 opcode)
844 {
845         /* zero out the desc */
846         memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
847         desc->opcode = cpu_to_le16(opcode);
848         desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
849 }
850
851 /**
852  *  i40evf_clean_arq_element
853  *  @hw: pointer to the hw struct
854  *  @e: event info from the receive descriptor, includes any buffers
855  *  @pending: number of events that could be left to process
856  *
857  *  This function cleans one Admin Receive Queue element and returns
858  *  the contents through e.  It can also return how many events are
859  *  left to process through 'pending'
860  **/
861 i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
862                                              struct i40e_arq_event_info *e,
863                                              u16 *pending)
864 {
865         i40e_status ret_code = 0;
866         u16 ntc = hw->aq.arq.next_to_clean;
867         struct i40e_aq_desc *desc;
868         struct i40e_dma_mem *bi;
869         u16 desc_idx;
870         u16 datalen;
871         u16 flags;
872         u16 ntu;
873
874         /* pre-clean the event info */
875         memset(&e->desc, 0, sizeof(e->desc));
876
877         /* take the lock before we start messing with the ring */
878         mutex_lock(&hw->aq.arq_mutex);
879
880         if (hw->aq.arq.count == 0) {
881                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
882                            "AQRX: Admin queue not initialized.\n");
883                 ret_code = I40E_ERR_QUEUE_EMPTY;
884                 goto clean_arq_element_err;
885         }
886
887         /* set next_to_use to head */
888         ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
889         if (ntu == ntc) {
890                 /* nothing to do - shouldn't need to update ring's values */
891                 ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
892                 goto clean_arq_element_out;
893         }
894
895         /* now clean the next descriptor */
896         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
897         desc_idx = ntc;
898
899         hw->aq.arq_last_status =
900                 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
901         flags = le16_to_cpu(desc->flags);
902         if (flags & I40E_AQ_FLAG_ERR) {
903                 ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
904                 i40e_debug(hw,
905                            I40E_DEBUG_AQ_MESSAGE,
906                            "AQRX: Event received with error 0x%X.\n",
907                            hw->aq.arq_last_status);
908         }
909
910         e->desc = *desc;
911         datalen = le16_to_cpu(desc->datalen);
912         e->msg_len = min(datalen, e->buf_len);
913         if (e->msg_buf != NULL && (e->msg_len != 0))
914                 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
915                        e->msg_len);
916
917         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
918         i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
919                         hw->aq.arq_buf_size);
920
921         /* Restore the original datalen and buffer address in the desc,
922          * FW updates datalen to indicate the event message
923          * size
924          */
925         bi = &hw->aq.arq.r.arq_bi[ntc];
926         memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
927
928         desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
929         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
930                 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
931         desc->datalen = cpu_to_le16((u16)bi->size);
932         desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
933         desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
934
935         /* set tail = the last cleaned desc index. */
936         wr32(hw, hw->aq.arq.tail, ntc);
937         /* ntc is updated to tail + 1 */
938         ntc++;
939         if (ntc == hw->aq.num_arq_entries)
940                 ntc = 0;
941         hw->aq.arq.next_to_clean = ntc;
942         hw->aq.arq.next_to_use = ntu;
943
944 clean_arq_element_out:
945         /* Set pending if needed, unlock and return */
946         if (pending != NULL)
947                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
948
949 clean_arq_element_err:
950         mutex_unlock(&hw->aq.arq_mutex);
951
952         return ret_code;
953 }
954
955 void i40evf_resume_aq(struct i40e_hw *hw)
956 {
957         /* Registers are reset after PF reset */
958         hw->aq.asq.next_to_use = 0;
959         hw->aq.asq.next_to_clean = 0;
960
961         i40e_config_asq_regs(hw);
962
963         hw->aq.arq.next_to_use = 0;
964         hw->aq.arq.next_to_clean = 0;
965
966         i40e_config_arq_regs(hw);
967 }