GNU Linux-libre 4.9.309-gnu1
[releases.git] / drivers / net / ethernet / amazon / ena / ena_com.c
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "ena_com.h"
34
35 /*****************************************************************************/
36 /*****************************************************************************/
37
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
40
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
43
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45                 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46                 | (ENA_COMMON_SPEC_VERSION_MINOR))
47
48 #define ENA_CTRL_MAJOR          0
49 #define ENA_CTRL_MINOR          0
50 #define ENA_CTRL_SUB_MINOR      1
51
52 #define MIN_ENA_CTRL_VER \
53         (((ENA_CTRL_MAJOR) << \
54         (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55         ((ENA_CTRL_MINOR) << \
56         (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
57         (ENA_CTRL_SUB_MINOR))
58
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)   ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)  ((u32)(((u64)(x)) >> 32))
61
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
63
64 #define ENA_REGS_ADMIN_INTR_MASK 1
65
66 /*****************************************************************************/
67 /*****************************************************************************/
68 /*****************************************************************************/
69
70 enum ena_cmd_status {
71         ENA_CMD_SUBMITTED,
72         ENA_CMD_COMPLETED,
73         /* Abort - canceled by the driver */
74         ENA_CMD_ABORTED,
75 };
76
77 struct ena_comp_ctx {
78         struct completion wait_event;
79         struct ena_admin_acq_entry *user_cqe;
80         u32 comp_size;
81         enum ena_cmd_status status;
82         /* status from the device */
83         u8 comp_status;
84         u8 cmd_opcode;
85         bool occupied;
86 };
87
88 struct ena_com_stats_ctx {
89         struct ena_admin_aq_get_stats_cmd get_cmd;
90         struct ena_admin_acq_get_stats_resp get_resp;
91 };
92
93 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
94                                        struct ena_common_mem_addr *ena_addr,
95                                        dma_addr_t addr)
96 {
97         if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
98                 pr_err("dma address has more bits that the device supports\n");
99                 return -EINVAL;
100         }
101
102         ena_addr->mem_addr_low = (u32)addr;
103         ena_addr->mem_addr_high = (u64)addr >> 32;
104
105         return 0;
106 }
107
108 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
109 {
110         struct ena_com_admin_sq *sq = &queue->sq;
111         u16 size = ADMIN_SQ_SIZE(queue->q_depth);
112
113         sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
114                                           GFP_KERNEL);
115
116         if (!sq->entries) {
117                 pr_err("memory allocation failed");
118                 return -ENOMEM;
119         }
120
121         sq->head = 0;
122         sq->tail = 0;
123         sq->phase = 1;
124
125         sq->db_addr = NULL;
126
127         return 0;
128 }
129
130 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
131 {
132         struct ena_com_admin_cq *cq = &queue->cq;
133         u16 size = ADMIN_CQ_SIZE(queue->q_depth);
134
135         cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
136                                           GFP_KERNEL);
137
138         if (!cq->entries) {
139                 pr_err("memory allocation failed");
140                 return -ENOMEM;
141         }
142
143         cq->head = 0;
144         cq->phase = 1;
145
146         return 0;
147 }
148
149 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
150                                    struct ena_aenq_handlers *aenq_handlers)
151 {
152         struct ena_com_aenq *aenq = &dev->aenq;
153         u32 addr_low, addr_high, aenq_caps;
154         u16 size;
155
156         dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
157         size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
158         aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
159                                             GFP_KERNEL);
160
161         if (!aenq->entries) {
162                 pr_err("memory allocation failed");
163                 return -ENOMEM;
164         }
165
166         aenq->head = aenq->q_depth;
167         aenq->phase = 1;
168
169         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
170         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
171
172         writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
173         writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
174
175         aenq_caps = 0;
176         aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
177         aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
178                       << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
179                      ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
180         writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
181
182         if (unlikely(!aenq_handlers)) {
183                 pr_err("aenq handlers pointer is NULL\n");
184                 return -EINVAL;
185         }
186
187         aenq->aenq_handlers = aenq_handlers;
188
189         return 0;
190 }
191
192 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
193                                      struct ena_comp_ctx *comp_ctx)
194 {
195         comp_ctx->occupied = false;
196         atomic_dec(&queue->outstanding_cmds);
197 }
198
199 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
200                                           u16 command_id, bool capture)
201 {
202         if (unlikely(!queue->comp_ctx)) {
203                 pr_err("Completion context is NULL\n");
204                 return NULL;
205         }
206
207         if (unlikely(command_id >= queue->q_depth)) {
208                 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
209                        command_id, queue->q_depth);
210                 return NULL;
211         }
212
213         if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
214                 pr_err("Completion context is occupied\n");
215                 return NULL;
216         }
217
218         if (capture) {
219                 atomic_inc(&queue->outstanding_cmds);
220                 queue->comp_ctx[command_id].occupied = true;
221         }
222
223         return &queue->comp_ctx[command_id];
224 }
225
226 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
227                                                        struct ena_admin_aq_entry *cmd,
228                                                        size_t cmd_size_in_bytes,
229                                                        struct ena_admin_acq_entry *comp,
230                                                        size_t comp_size_in_bytes)
231 {
232         struct ena_comp_ctx *comp_ctx;
233         u16 tail_masked, cmd_id;
234         u16 queue_size_mask;
235         u16 cnt;
236
237         queue_size_mask = admin_queue->q_depth - 1;
238
239         tail_masked = admin_queue->sq.tail & queue_size_mask;
240
241         /* In case of queue FULL */
242         cnt = atomic_read(&admin_queue->outstanding_cmds);
243         if (cnt >= admin_queue->q_depth) {
244                 pr_debug("admin queue is full.\n");
245                 admin_queue->stats.out_of_space++;
246                 return ERR_PTR(-ENOSPC);
247         }
248
249         cmd_id = admin_queue->curr_cmd_id;
250
251         cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
252                 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
253
254         cmd->aq_common_descriptor.command_id |= cmd_id &
255                 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
256
257         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
258         if (unlikely(!comp_ctx))
259                 return ERR_PTR(-EINVAL);
260
261         comp_ctx->status = ENA_CMD_SUBMITTED;
262         comp_ctx->comp_size = (u32)comp_size_in_bytes;
263         comp_ctx->user_cqe = comp;
264         comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
265
266         reinit_completion(&comp_ctx->wait_event);
267
268         memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
269
270         admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
271                 queue_size_mask;
272
273         admin_queue->sq.tail++;
274         admin_queue->stats.submitted_cmd++;
275
276         if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
277                 admin_queue->sq.phase = !admin_queue->sq.phase;
278
279         writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
280
281         return comp_ctx;
282 }
283
284 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
285 {
286         size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
287         struct ena_comp_ctx *comp_ctx;
288         u16 i;
289
290         queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
291         if (unlikely(!queue->comp_ctx)) {
292                 pr_err("memory allocation failed");
293                 return -ENOMEM;
294         }
295
296         for (i = 0; i < queue->q_depth; i++) {
297                 comp_ctx = get_comp_ctxt(queue, i, false);
298                 if (comp_ctx)
299                         init_completion(&comp_ctx->wait_event);
300         }
301
302         return 0;
303 }
304
305 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
306                                                      struct ena_admin_aq_entry *cmd,
307                                                      size_t cmd_size_in_bytes,
308                                                      struct ena_admin_acq_entry *comp,
309                                                      size_t comp_size_in_bytes)
310 {
311         unsigned long flags;
312         struct ena_comp_ctx *comp_ctx;
313
314         spin_lock_irqsave(&admin_queue->q_lock, flags);
315         if (unlikely(!admin_queue->running_state)) {
316                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
317                 return ERR_PTR(-ENODEV);
318         }
319         comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
320                                               cmd_size_in_bytes,
321                                               comp,
322                                               comp_size_in_bytes);
323         if (unlikely(IS_ERR(comp_ctx)))
324                 admin_queue->running_state = false;
325         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
326
327         return comp_ctx;
328 }
329
330 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
331                               struct ena_com_create_io_ctx *ctx,
332                               struct ena_com_io_sq *io_sq)
333 {
334         size_t size;
335         int dev_node = 0;
336
337         memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
338
339         io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
340         io_sq->desc_entry_size =
341                 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
342                 sizeof(struct ena_eth_io_tx_desc) :
343                 sizeof(struct ena_eth_io_rx_desc);
344
345         size = io_sq->desc_entry_size * io_sq->q_depth;
346
347         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
348                 dev_node = dev_to_node(ena_dev->dmadev);
349                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
350                 io_sq->desc_addr.virt_addr =
351                         dma_zalloc_coherent(ena_dev->dmadev, size,
352                                             &io_sq->desc_addr.phys_addr,
353                                             GFP_KERNEL);
354                 set_dev_node(ena_dev->dmadev, dev_node);
355                 if (!io_sq->desc_addr.virt_addr) {
356                         io_sq->desc_addr.virt_addr =
357                                 dma_zalloc_coherent(ena_dev->dmadev, size,
358                                                     &io_sq->desc_addr.phys_addr,
359                                                     GFP_KERNEL);
360                 }
361         } else {
362                 dev_node = dev_to_node(ena_dev->dmadev);
363                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
364                 io_sq->desc_addr.virt_addr =
365                         devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
366                 set_dev_node(ena_dev->dmadev, dev_node);
367                 if (!io_sq->desc_addr.virt_addr) {
368                         io_sq->desc_addr.virt_addr =
369                                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
370                 }
371         }
372
373         if (!io_sq->desc_addr.virt_addr) {
374                 pr_err("memory allocation failed");
375                 return -ENOMEM;
376         }
377
378         io_sq->tail = 0;
379         io_sq->next_to_comp = 0;
380         io_sq->phase = 1;
381
382         return 0;
383 }
384
385 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
386                               struct ena_com_create_io_ctx *ctx,
387                               struct ena_com_io_cq *io_cq)
388 {
389         size_t size;
390         int prev_node = 0;
391
392         memset(&io_cq->cdesc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
393
394         /* Use the basic completion descriptor for Rx */
395         io_cq->cdesc_entry_size_in_bytes =
396                 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
397                 sizeof(struct ena_eth_io_tx_cdesc) :
398                 sizeof(struct ena_eth_io_rx_cdesc_base);
399
400         size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
401
402         prev_node = dev_to_node(ena_dev->dmadev);
403         set_dev_node(ena_dev->dmadev, ctx->numa_node);
404         io_cq->cdesc_addr.virt_addr =
405                 dma_zalloc_coherent(ena_dev->dmadev, size,
406                                     &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
407         set_dev_node(ena_dev->dmadev, prev_node);
408         if (!io_cq->cdesc_addr.virt_addr) {
409                 io_cq->cdesc_addr.virt_addr =
410                         dma_zalloc_coherent(ena_dev->dmadev, size,
411                                             &io_cq->cdesc_addr.phys_addr,
412                                             GFP_KERNEL);
413         }
414
415         if (!io_cq->cdesc_addr.virt_addr) {
416                 pr_err("memory allocation failed");
417                 return -ENOMEM;
418         }
419
420         io_cq->phase = 1;
421         io_cq->head = 0;
422
423         return 0;
424 }
425
426 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
427                                                    struct ena_admin_acq_entry *cqe)
428 {
429         struct ena_comp_ctx *comp_ctx;
430         u16 cmd_id;
431
432         cmd_id = cqe->acq_common_descriptor.command &
433                 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
434
435         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
436         if (unlikely(!comp_ctx)) {
437                 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
438                 admin_queue->running_state = false;
439                 return;
440         }
441
442         comp_ctx->status = ENA_CMD_COMPLETED;
443         comp_ctx->comp_status = cqe->acq_common_descriptor.status;
444
445         if (comp_ctx->user_cqe)
446                 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
447
448         if (!admin_queue->polling)
449                 complete(&comp_ctx->wait_event);
450 }
451
452 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
453 {
454         struct ena_admin_acq_entry *cqe = NULL;
455         u16 comp_num = 0;
456         u16 head_masked;
457         u8 phase;
458
459         head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
460         phase = admin_queue->cq.phase;
461
462         cqe = &admin_queue->cq.entries[head_masked];
463
464         /* Go over all the completions */
465         while ((cqe->acq_common_descriptor.flags &
466                         ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
467                 /* Do not read the rest of the completion entry before the
468                  * phase bit was validated
469                  */
470                 rmb();
471                 ena_com_handle_single_admin_completion(admin_queue, cqe);
472
473                 head_masked++;
474                 comp_num++;
475                 if (unlikely(head_masked == admin_queue->q_depth)) {
476                         head_masked = 0;
477                         phase = !phase;
478                 }
479
480                 cqe = &admin_queue->cq.entries[head_masked];
481         }
482
483         admin_queue->cq.head += comp_num;
484         admin_queue->cq.phase = phase;
485         admin_queue->sq.head += comp_num;
486         admin_queue->stats.completed_cmd += comp_num;
487 }
488
489 static int ena_com_comp_status_to_errno(u8 comp_status)
490 {
491         if (unlikely(comp_status != 0))
492                 pr_err("admin command failed[%u]\n", comp_status);
493
494         if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
495                 return -EINVAL;
496
497         switch (comp_status) {
498         case ENA_ADMIN_SUCCESS:
499                 return 0;
500         case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
501                 return -ENOMEM;
502         case ENA_ADMIN_UNSUPPORTED_OPCODE:
503                 return -EPERM;
504         case ENA_ADMIN_BAD_OPCODE:
505         case ENA_ADMIN_MALFORMED_REQUEST:
506         case ENA_ADMIN_ILLEGAL_PARAMETER:
507         case ENA_ADMIN_UNKNOWN_ERROR:
508                 return -EINVAL;
509         }
510
511         return 0;
512 }
513
514 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
515                                                      struct ena_com_admin_queue *admin_queue)
516 {
517         unsigned long flags, timeout;
518         int ret;
519
520         timeout = jiffies + ADMIN_CMD_TIMEOUT_US;
521
522         while (1) {
523                 spin_lock_irqsave(&admin_queue->q_lock, flags);
524                 ena_com_handle_admin_completion(admin_queue);
525                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
526
527                 if (comp_ctx->status != ENA_CMD_SUBMITTED)
528                         break;
529
530                 if (time_is_before_jiffies(timeout)) {
531                         pr_err("Wait for completion (polling) timeout\n");
532                         /* ENA didn't have any completion */
533                         spin_lock_irqsave(&admin_queue->q_lock, flags);
534                         admin_queue->stats.no_completion++;
535                         admin_queue->running_state = false;
536                         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
537
538                         ret = -ETIME;
539                         goto err;
540                 }
541
542                 msleep(100);
543         }
544
545         if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
546                 pr_err("Command was aborted\n");
547                 spin_lock_irqsave(&admin_queue->q_lock, flags);
548                 admin_queue->stats.aborted_cmd++;
549                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
550                 ret = -ENODEV;
551                 goto err;
552         }
553
554         WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
555              comp_ctx->status);
556
557         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
558 err:
559         comp_ctxt_release(admin_queue, comp_ctx);
560         return ret;
561 }
562
563 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
564                                                         struct ena_com_admin_queue *admin_queue)
565 {
566         unsigned long flags;
567         int ret;
568
569         wait_for_completion_timeout(&comp_ctx->wait_event,
570                                     usecs_to_jiffies(ADMIN_CMD_TIMEOUT_US));
571
572         /* In case the command wasn't completed find out the root cause.
573          * There might be 2 kinds of errors
574          * 1) No completion (timeout reached)
575          * 2) There is completion but the device didn't get any msi-x interrupt.
576          */
577         if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
578                 spin_lock_irqsave(&admin_queue->q_lock, flags);
579                 ena_com_handle_admin_completion(admin_queue);
580                 admin_queue->stats.no_completion++;
581                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
582
583                 if (comp_ctx->status == ENA_CMD_COMPLETED)
584                         pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
585                                comp_ctx->cmd_opcode);
586                 else
587                         pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
588                                comp_ctx->cmd_opcode, comp_ctx->status);
589
590                 admin_queue->running_state = false;
591                 ret = -ETIME;
592                 goto err;
593         }
594
595         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
596 err:
597         comp_ctxt_release(admin_queue, comp_ctx);
598         return ret;
599 }
600
601 /* This method read the hardware device register through posting writes
602  * and waiting for response
603  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
604  */
605 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
606 {
607         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
608         volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
609                 mmio_read->read_resp;
610         u32 mmio_read_reg, ret;
611         unsigned long flags;
612         int i;
613
614         might_sleep();
615
616         /* If readless is disabled, perform regular read */
617         if (!mmio_read->readless_supported)
618                 return readl(ena_dev->reg_bar + offset);
619
620         spin_lock_irqsave(&mmio_read->lock, flags);
621         mmio_read->seq_num++;
622
623         read_resp->req_id = mmio_read->seq_num + 0xDEAD;
624         mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
625                         ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
626         mmio_read_reg |= mmio_read->seq_num &
627                         ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
628
629         /* make sure read_resp->req_id get updated before the hw can write
630          * there
631          */
632         wmb();
633
634         writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
635
636         for (i = 0; i < ENA_REG_READ_TIMEOUT; i++) {
637                 if (read_resp->req_id == mmio_read->seq_num)
638                         break;
639
640                 udelay(1);
641         }
642
643         if (unlikely(i == ENA_REG_READ_TIMEOUT)) {
644                 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
645                        mmio_read->seq_num, offset, read_resp->req_id,
646                        read_resp->reg_off);
647                 ret = ENA_MMIO_READ_TIMEOUT;
648                 goto err;
649         }
650
651         if (read_resp->reg_off != offset) {
652                 pr_err("Read failure: wrong offset provided");
653                 ret = ENA_MMIO_READ_TIMEOUT;
654         } else {
655                 ret = read_resp->reg_val;
656         }
657 err:
658         spin_unlock_irqrestore(&mmio_read->lock, flags);
659
660         return ret;
661 }
662
663 /* There are two types to wait for completion.
664  * Polling mode - wait until the completion is available.
665  * Async mode - wait on wait queue until the completion is ready
666  * (or the timeout expired).
667  * It is expected that the IRQ called ena_com_handle_admin_completion
668  * to mark the completions.
669  */
670 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
671                                              struct ena_com_admin_queue *admin_queue)
672 {
673         if (admin_queue->polling)
674                 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
675                                                                  admin_queue);
676
677         return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
678                                                             admin_queue);
679 }
680
681 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
682                                  struct ena_com_io_sq *io_sq)
683 {
684         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
685         struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
686         struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
687         u8 direction;
688         int ret;
689
690         memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
691
692         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
693                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
694         else
695                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
696
697         destroy_cmd.sq.sq_identity |= (direction <<
698                 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
699                 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
700
701         destroy_cmd.sq.sq_idx = io_sq->idx;
702         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
703
704         ret = ena_com_execute_admin_command(admin_queue,
705                                             (struct ena_admin_aq_entry *)&destroy_cmd,
706                                             sizeof(destroy_cmd),
707                                             (struct ena_admin_acq_entry *)&destroy_resp,
708                                             sizeof(destroy_resp));
709
710         if (unlikely(ret && (ret != -ENODEV)))
711                 pr_err("failed to destroy io sq error: %d\n", ret);
712
713         return ret;
714 }
715
716 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
717                                   struct ena_com_io_sq *io_sq,
718                                   struct ena_com_io_cq *io_cq)
719 {
720         size_t size;
721
722         if (io_cq->cdesc_addr.virt_addr) {
723                 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
724
725                 dma_free_coherent(ena_dev->dmadev, size,
726                                   io_cq->cdesc_addr.virt_addr,
727                                   io_cq->cdesc_addr.phys_addr);
728
729                 io_cq->cdesc_addr.virt_addr = NULL;
730         }
731
732         if (io_sq->desc_addr.virt_addr) {
733                 size = io_sq->desc_entry_size * io_sq->q_depth;
734
735                 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
736                         dma_free_coherent(ena_dev->dmadev, size,
737                                           io_sq->desc_addr.virt_addr,
738                                           io_sq->desc_addr.phys_addr);
739                 else
740                         devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
741
742                 io_sq->desc_addr.virt_addr = NULL;
743         }
744 }
745
746 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
747                                 u16 exp_state)
748 {
749         u32 val, i;
750
751         for (i = 0; i < timeout; i++) {
752                 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
753
754                 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
755                         pr_err("Reg read timeout occurred\n");
756                         return -ETIME;
757                 }
758
759                 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
760                         exp_state)
761                         return 0;
762
763                 /* The resolution of the timeout is 100ms */
764                 msleep(100);
765         }
766
767         return -ETIME;
768 }
769
770 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
771                                                enum ena_admin_aq_feature_id feature_id)
772 {
773         u32 feature_mask = 1 << feature_id;
774
775         /* Device attributes is always supported */
776         if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
777             !(ena_dev->supported_features & feature_mask))
778                 return false;
779
780         return true;
781 }
782
783 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
784                                   struct ena_admin_get_feat_resp *get_resp,
785                                   enum ena_admin_aq_feature_id feature_id,
786                                   dma_addr_t control_buf_dma_addr,
787                                   u32 control_buff_size)
788 {
789         struct ena_com_admin_queue *admin_queue;
790         struct ena_admin_get_feat_cmd get_cmd;
791         int ret;
792
793         if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
794                 pr_info("Feature %d isn't supported\n", feature_id);
795                 return -EPERM;
796         }
797
798         memset(&get_cmd, 0x0, sizeof(get_cmd));
799         admin_queue = &ena_dev->admin_queue;
800
801         get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
802
803         if (control_buff_size)
804                 get_cmd.aq_common_descriptor.flags =
805                         ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
806         else
807                 get_cmd.aq_common_descriptor.flags = 0;
808
809         ret = ena_com_mem_addr_set(ena_dev,
810                                    &get_cmd.control_buffer.address,
811                                    control_buf_dma_addr);
812         if (unlikely(ret)) {
813                 pr_err("memory address set failed\n");
814                 return ret;
815         }
816
817         get_cmd.control_buffer.length = control_buff_size;
818
819         get_cmd.feat_common.feature_id = feature_id;
820
821         ret = ena_com_execute_admin_command(admin_queue,
822                                             (struct ena_admin_aq_entry *)
823                                             &get_cmd,
824                                             sizeof(get_cmd),
825                                             (struct ena_admin_acq_entry *)
826                                             get_resp,
827                                             sizeof(*get_resp));
828
829         if (unlikely(ret))
830                 pr_err("Failed to submit get_feature command %d error: %d\n",
831                        feature_id, ret);
832
833         return ret;
834 }
835
836 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
837                                struct ena_admin_get_feat_resp *get_resp,
838                                enum ena_admin_aq_feature_id feature_id)
839 {
840         return ena_com_get_feature_ex(ena_dev,
841                                       get_resp,
842                                       feature_id,
843                                       0,
844                                       0);
845 }
846
847 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
848 {
849         struct ena_admin_feature_rss_flow_hash_control *hash_key =
850                 (ena_dev->rss).hash_key;
851
852         netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
853         /* The key is stored in the device in u32 array
854          * as well as the API requires the key to be passed in this
855          * format. Thus the size of our array should be divided by 4
856          */
857         hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
858 }
859
860 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
861 {
862         return ena_dev->rss.hash_func;
863 }
864
865 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
866 {
867         struct ena_rss *rss = &ena_dev->rss;
868
869         rss->hash_key =
870                 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
871                                     &rss->hash_key_dma_addr, GFP_KERNEL);
872
873         if (unlikely(!rss->hash_key))
874                 return -ENOMEM;
875
876         return 0;
877 }
878
879 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
880 {
881         struct ena_rss *rss = &ena_dev->rss;
882
883         if (rss->hash_key)
884                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
885                                   rss->hash_key, rss->hash_key_dma_addr);
886         rss->hash_key = NULL;
887 }
888
889 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
890 {
891         struct ena_rss *rss = &ena_dev->rss;
892
893         rss->hash_ctrl =
894                 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
895                                     &rss->hash_ctrl_dma_addr, GFP_KERNEL);
896
897         if (unlikely(!rss->hash_ctrl))
898                 return -ENOMEM;
899
900         return 0;
901 }
902
903 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
904 {
905         struct ena_rss *rss = &ena_dev->rss;
906
907         if (rss->hash_ctrl)
908                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
909                                   rss->hash_ctrl, rss->hash_ctrl_dma_addr);
910         rss->hash_ctrl = NULL;
911 }
912
913 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
914                                            u16 log_size)
915 {
916         struct ena_rss *rss = &ena_dev->rss;
917         struct ena_admin_get_feat_resp get_resp;
918         size_t tbl_size;
919         int ret;
920
921         ret = ena_com_get_feature(ena_dev, &get_resp,
922                                   ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
923         if (unlikely(ret))
924                 return ret;
925
926         if ((get_resp.u.ind_table.min_size > log_size) ||
927             (get_resp.u.ind_table.max_size < log_size)) {
928                 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
929                        1 << log_size, 1 << get_resp.u.ind_table.min_size,
930                        1 << get_resp.u.ind_table.max_size);
931                 return -EINVAL;
932         }
933
934         tbl_size = (1ULL << log_size) *
935                 sizeof(struct ena_admin_rss_ind_table_entry);
936
937         rss->rss_ind_tbl =
938                 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
939                                     &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
940         if (unlikely(!rss->rss_ind_tbl))
941                 goto mem_err1;
942
943         tbl_size = (1ULL << log_size) * sizeof(u16);
944         rss->host_rss_ind_tbl =
945                 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
946         if (unlikely(!rss->host_rss_ind_tbl))
947                 goto mem_err2;
948
949         rss->tbl_log_size = log_size;
950
951         return 0;
952
953 mem_err2:
954         tbl_size = (1ULL << log_size) *
955                 sizeof(struct ena_admin_rss_ind_table_entry);
956
957         dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
958                           rss->rss_ind_tbl_dma_addr);
959         rss->rss_ind_tbl = NULL;
960 mem_err1:
961         rss->tbl_log_size = 0;
962         return -ENOMEM;
963 }
964
965 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
966 {
967         struct ena_rss *rss = &ena_dev->rss;
968         size_t tbl_size = (1ULL << rss->tbl_log_size) *
969                 sizeof(struct ena_admin_rss_ind_table_entry);
970
971         if (rss->rss_ind_tbl)
972                 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
973                                   rss->rss_ind_tbl_dma_addr);
974         rss->rss_ind_tbl = NULL;
975
976         if (rss->host_rss_ind_tbl)
977                 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
978         rss->host_rss_ind_tbl = NULL;
979 }
980
981 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
982                                 struct ena_com_io_sq *io_sq, u16 cq_idx)
983 {
984         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
985         struct ena_admin_aq_create_sq_cmd create_cmd;
986         struct ena_admin_acq_create_sq_resp_desc cmd_completion;
987         u8 direction;
988         int ret;
989
990         memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_sq_cmd));
991
992         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
993
994         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
995                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
996         else
997                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
998
999         create_cmd.sq_identity |= (direction <<
1000                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1001                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1002
1003         create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1004                 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1005
1006         create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1007                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1008                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1009
1010         create_cmd.sq_caps_3 |=
1011                 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1012
1013         create_cmd.cq_idx = cq_idx;
1014         create_cmd.sq_depth = io_sq->q_depth;
1015
1016         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1017                 ret = ena_com_mem_addr_set(ena_dev,
1018                                            &create_cmd.sq_ba,
1019                                            io_sq->desc_addr.phys_addr);
1020                 if (unlikely(ret)) {
1021                         pr_err("memory address set failed\n");
1022                         return ret;
1023                 }
1024         }
1025
1026         ret = ena_com_execute_admin_command(admin_queue,
1027                                             (struct ena_admin_aq_entry *)&create_cmd,
1028                                             sizeof(create_cmd),
1029                                             (struct ena_admin_acq_entry *)&cmd_completion,
1030                                             sizeof(cmd_completion));
1031         if (unlikely(ret)) {
1032                 pr_err("Failed to create IO SQ. error: %d\n", ret);
1033                 return ret;
1034         }
1035
1036         io_sq->idx = cmd_completion.sq_idx;
1037
1038         io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1039                 (uintptr_t)cmd_completion.sq_doorbell_offset);
1040
1041         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1042                 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1043                                 + cmd_completion.llq_headers_offset);
1044
1045                 io_sq->desc_addr.pbuf_dev_addr =
1046                         (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1047                         cmd_completion.llq_descriptors_offset);
1048         }
1049
1050         pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1051
1052         return ret;
1053 }
1054
1055 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1056 {
1057         struct ena_rss *rss = &ena_dev->rss;
1058         struct ena_com_io_sq *io_sq;
1059         u16 qid;
1060         int i;
1061
1062         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1063                 qid = rss->host_rss_ind_tbl[i];
1064                 if (qid >= ENA_TOTAL_NUM_QUEUES)
1065                         return -EINVAL;
1066
1067                 io_sq = &ena_dev->io_sq_queues[qid];
1068
1069                 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1070                         return -EINVAL;
1071
1072                 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1073         }
1074
1075         return 0;
1076 }
1077
1078 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1079 {
1080         u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1081         struct ena_rss *rss = &ena_dev->rss;
1082         u8 idx;
1083         u16 i;
1084
1085         for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1086                 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1087
1088         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1089                 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1090                         return -EINVAL;
1091                 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1092
1093                 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1094                         return -EINVAL;
1095
1096                 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1097         }
1098
1099         return 0;
1100 }
1101
1102 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1103 {
1104         size_t size;
1105
1106         size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1107
1108         ena_dev->intr_moder_tbl =
1109                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1110         if (!ena_dev->intr_moder_tbl)
1111                 return -ENOMEM;
1112
1113         ena_com_config_default_interrupt_moderation_table(ena_dev);
1114
1115         return 0;
1116 }
1117
1118 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1119                                                  u16 intr_delay_resolution)
1120 {
1121         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1122         unsigned int i;
1123
1124         if (!intr_delay_resolution) {
1125                 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1126                 intr_delay_resolution = 1;
1127         }
1128         ena_dev->intr_delay_resolution = intr_delay_resolution;
1129
1130         /* update Rx */
1131         for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1132                 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1133
1134         /* update Tx */
1135         ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1136 }
1137
1138 /*****************************************************************************/
1139 /*******************************      API       ******************************/
1140 /*****************************************************************************/
1141
1142 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1143                                   struct ena_admin_aq_entry *cmd,
1144                                   size_t cmd_size,
1145                                   struct ena_admin_acq_entry *comp,
1146                                   size_t comp_size)
1147 {
1148         struct ena_comp_ctx *comp_ctx;
1149         int ret;
1150
1151         comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1152                                             comp, comp_size);
1153         if (unlikely(IS_ERR(comp_ctx))) {
1154                 pr_err("Failed to submit command [%ld]\n", PTR_ERR(comp_ctx));
1155                 return PTR_ERR(comp_ctx);
1156         }
1157
1158         ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1159         if (unlikely(ret)) {
1160                 if (admin_queue->running_state)
1161                         pr_err("Failed to process command. ret = %d\n", ret);
1162                 else
1163                         pr_debug("Failed to process command. ret = %d\n", ret);
1164         }
1165         return ret;
1166 }
1167
1168 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1169                          struct ena_com_io_cq *io_cq)
1170 {
1171         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1172         struct ena_admin_aq_create_cq_cmd create_cmd;
1173         struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1174         int ret;
1175
1176         memset(&create_cmd, 0x0, sizeof(struct ena_admin_aq_create_cq_cmd));
1177
1178         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1179
1180         create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1181                 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1182         create_cmd.cq_caps_1 |=
1183                 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1184
1185         create_cmd.msix_vector = io_cq->msix_vector;
1186         create_cmd.cq_depth = io_cq->q_depth;
1187
1188         ret = ena_com_mem_addr_set(ena_dev,
1189                                    &create_cmd.cq_ba,
1190                                    io_cq->cdesc_addr.phys_addr);
1191         if (unlikely(ret)) {
1192                 pr_err("memory address set failed\n");
1193                 return ret;
1194         }
1195
1196         ret = ena_com_execute_admin_command(admin_queue,
1197                                             (struct ena_admin_aq_entry *)&create_cmd,
1198                                             sizeof(create_cmd),
1199                                             (struct ena_admin_acq_entry *)&cmd_completion,
1200                                             sizeof(cmd_completion));
1201         if (unlikely(ret)) {
1202                 pr_err("Failed to create IO CQ. error: %d\n", ret);
1203                 return ret;
1204         }
1205
1206         io_cq->idx = cmd_completion.cq_idx;
1207
1208         io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1209                 cmd_completion.cq_interrupt_unmask_register_offset);
1210
1211         if (cmd_completion.cq_head_db_register_offset)
1212                 io_cq->cq_head_db_reg =
1213                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1214                         cmd_completion.cq_head_db_register_offset);
1215
1216         if (cmd_completion.numa_node_register_offset)
1217                 io_cq->numa_node_cfg_reg =
1218                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1219                         cmd_completion.numa_node_register_offset);
1220
1221         pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1222
1223         return ret;
1224 }
1225
1226 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1227                             struct ena_com_io_sq **io_sq,
1228                             struct ena_com_io_cq **io_cq)
1229 {
1230         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1231                 pr_err("Invalid queue number %d but the max is %d\n", qid,
1232                        ENA_TOTAL_NUM_QUEUES);
1233                 return -EINVAL;
1234         }
1235
1236         *io_sq = &ena_dev->io_sq_queues[qid];
1237         *io_cq = &ena_dev->io_cq_queues[qid];
1238
1239         return 0;
1240 }
1241
1242 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1243 {
1244         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1245         struct ena_comp_ctx *comp_ctx;
1246         u16 i;
1247
1248         if (!admin_queue->comp_ctx)
1249                 return;
1250
1251         for (i = 0; i < admin_queue->q_depth; i++) {
1252                 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1253                 if (unlikely(!comp_ctx))
1254                         break;
1255
1256                 comp_ctx->status = ENA_CMD_ABORTED;
1257
1258                 complete(&comp_ctx->wait_event);
1259         }
1260 }
1261
1262 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1263 {
1264         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1265         unsigned long flags;
1266
1267         spin_lock_irqsave(&admin_queue->q_lock, flags);
1268         while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1269                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1270                 msleep(20);
1271                 spin_lock_irqsave(&admin_queue->q_lock, flags);
1272         }
1273         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1274 }
1275
1276 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1277                           struct ena_com_io_cq *io_cq)
1278 {
1279         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1280         struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1281         struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1282         int ret;
1283
1284         memset(&destroy_cmd, 0x0, sizeof(struct ena_admin_aq_destroy_sq_cmd));
1285
1286         destroy_cmd.cq_idx = io_cq->idx;
1287         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1288
1289         ret = ena_com_execute_admin_command(admin_queue,
1290                                             (struct ena_admin_aq_entry *)&destroy_cmd,
1291                                             sizeof(destroy_cmd),
1292                                             (struct ena_admin_acq_entry *)&destroy_resp,
1293                                             sizeof(destroy_resp));
1294
1295         if (unlikely(ret && (ret != -ENODEV)))
1296                 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1297
1298         return ret;
1299 }
1300
1301 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1302 {
1303         return ena_dev->admin_queue.running_state;
1304 }
1305
1306 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1307 {
1308         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1309         unsigned long flags;
1310
1311         spin_lock_irqsave(&admin_queue->q_lock, flags);
1312         ena_dev->admin_queue.running_state = state;
1313         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1314 }
1315
1316 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1317 {
1318         u16 depth = ena_dev->aenq.q_depth;
1319
1320         WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1321
1322         /* Init head_db to mark that all entries in the queue
1323          * are initially available
1324          */
1325         writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1326 }
1327
1328 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1329 {
1330         struct ena_com_admin_queue *admin_queue;
1331         struct ena_admin_set_feat_cmd cmd;
1332         struct ena_admin_set_feat_resp resp;
1333         struct ena_admin_get_feat_resp get_resp;
1334         int ret;
1335
1336         ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1337         if (ret) {
1338                 pr_info("Can't get aenq configuration\n");
1339                 return ret;
1340         }
1341
1342         if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1343                 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1344                         get_resp.u.aenq.supported_groups, groups_flag);
1345                 return -EPERM;
1346         }
1347
1348         memset(&cmd, 0x0, sizeof(cmd));
1349         admin_queue = &ena_dev->admin_queue;
1350
1351         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1352         cmd.aq_common_descriptor.flags = 0;
1353         cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1354         cmd.u.aenq.enabled_groups = groups_flag;
1355
1356         ret = ena_com_execute_admin_command(admin_queue,
1357                                             (struct ena_admin_aq_entry *)&cmd,
1358                                             sizeof(cmd),
1359                                             (struct ena_admin_acq_entry *)&resp,
1360                                             sizeof(resp));
1361
1362         if (unlikely(ret))
1363                 pr_err("Failed to config AENQ ret: %d\n", ret);
1364
1365         return ret;
1366 }
1367
1368 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1369 {
1370         u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1371         int width;
1372
1373         if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1374                 pr_err("Reg read timeout occurred\n");
1375                 return -ETIME;
1376         }
1377
1378         width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1379                 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1380
1381         pr_debug("ENA dma width: %d\n", width);
1382
1383         if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1384                 pr_err("DMA width illegal value: %d\n", width);
1385                 return -EINVAL;
1386         }
1387
1388         ena_dev->dma_addr_bits = width;
1389
1390         return width;
1391 }
1392
1393 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1394 {
1395         u32 ver;
1396         u32 ctrl_ver;
1397         u32 ctrl_ver_masked;
1398
1399         /* Make sure the ENA version and the controller version are at least
1400          * as the driver expects
1401          */
1402         ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1403         ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1404                                           ENA_REGS_CONTROLLER_VERSION_OFF);
1405
1406         if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1407                      (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1408                 pr_err("Reg read timeout occurred\n");
1409                 return -ETIME;
1410         }
1411
1412         pr_info("ena device version: %d.%d\n",
1413                 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1414                         ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1415                 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1416
1417         if (ver < MIN_ENA_VER) {
1418                 pr_err("ENA version is lower than the minimal version the driver supports\n");
1419                 return -1;
1420         }
1421
1422         pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1423                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1424                         ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1425                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1426                         ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1427                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1428                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1429                         ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1430
1431         ctrl_ver_masked =
1432                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1433                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1434                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1435
1436         /* Validate the ctrl version without the implementation ID */
1437         if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1438                 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1439                 return -1;
1440         }
1441
1442         return 0;
1443 }
1444
1445 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1446 {
1447         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1448         struct ena_com_admin_cq *cq = &admin_queue->cq;
1449         struct ena_com_admin_sq *sq = &admin_queue->sq;
1450         struct ena_com_aenq *aenq = &ena_dev->aenq;
1451         u16 size;
1452
1453         if (admin_queue->comp_ctx)
1454                 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1455         admin_queue->comp_ctx = NULL;
1456         size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1457         if (sq->entries)
1458                 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1459                                   sq->dma_addr);
1460         sq->entries = NULL;
1461
1462         size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1463         if (cq->entries)
1464                 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1465                                   cq->dma_addr);
1466         cq->entries = NULL;
1467
1468         size = ADMIN_AENQ_SIZE(aenq->q_depth);
1469         if (ena_dev->aenq.entries)
1470                 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1471                                   aenq->dma_addr);
1472         aenq->entries = NULL;
1473 }
1474
1475 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1476 {
1477         u32 mask_value = 0;
1478
1479         if (polling)
1480                 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1481
1482         writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1483         ena_dev->admin_queue.polling = polling;
1484 }
1485
1486 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1487 {
1488         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1489
1490         spin_lock_init(&mmio_read->lock);
1491         mmio_read->read_resp =
1492                 dma_zalloc_coherent(ena_dev->dmadev,
1493                                     sizeof(*mmio_read->read_resp),
1494                                     &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1495         if (unlikely(!mmio_read->read_resp))
1496                 return -ENOMEM;
1497
1498         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1499
1500         mmio_read->read_resp->req_id = 0x0;
1501         mmio_read->seq_num = 0x0;
1502         mmio_read->readless_supported = true;
1503
1504         return 0;
1505 }
1506
1507 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1508 {
1509         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1510
1511         mmio_read->readless_supported = readless_supported;
1512 }
1513
1514 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1515 {
1516         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1517
1518         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1519         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1520
1521         dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1522                           mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1523
1524         mmio_read->read_resp = NULL;
1525 }
1526
1527 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1528 {
1529         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1530         u32 addr_low, addr_high;
1531
1532         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1533         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1534
1535         writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1536         writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1537 }
1538
1539 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1540                        struct ena_aenq_handlers *aenq_handlers,
1541                        bool init_spinlock)
1542 {
1543         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1544         u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1545         int ret;
1546
1547         dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1548
1549         if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1550                 pr_err("Reg read timeout occurred\n");
1551                 return -ETIME;
1552         }
1553
1554         if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1555                 pr_err("Device isn't ready, abort com init\n");
1556                 return -ENODEV;
1557         }
1558
1559         admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1560
1561         admin_queue->q_dmadev = ena_dev->dmadev;
1562         admin_queue->polling = false;
1563         admin_queue->curr_cmd_id = 0;
1564
1565         atomic_set(&admin_queue->outstanding_cmds, 0);
1566
1567         if (init_spinlock)
1568                 spin_lock_init(&admin_queue->q_lock);
1569
1570         ret = ena_com_init_comp_ctxt(admin_queue);
1571         if (ret)
1572                 goto error;
1573
1574         ret = ena_com_admin_init_sq(admin_queue);
1575         if (ret)
1576                 goto error;
1577
1578         ret = ena_com_admin_init_cq(admin_queue);
1579         if (ret)
1580                 goto error;
1581
1582         admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1583                 ENA_REGS_AQ_DB_OFF);
1584
1585         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1586         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1587
1588         writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1589         writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1590
1591         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1592         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1593
1594         writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1595         writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1596
1597         aq_caps = 0;
1598         aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1599         aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1600                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1601                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1602
1603         acq_caps = 0;
1604         acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1605         acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1606                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1607                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1608
1609         writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1610         writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1611         ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1612         if (ret)
1613                 goto error;
1614
1615         admin_queue->running_state = true;
1616
1617         return 0;
1618 error:
1619         ena_com_admin_destroy(ena_dev);
1620
1621         return ret;
1622 }
1623
1624 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1625                             struct ena_com_create_io_ctx *ctx)
1626 {
1627         struct ena_com_io_sq *io_sq;
1628         struct ena_com_io_cq *io_cq;
1629         int ret;
1630
1631         if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1632                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1633                        ctx->qid, ENA_TOTAL_NUM_QUEUES);
1634                 return -EINVAL;
1635         }
1636
1637         io_sq = &ena_dev->io_sq_queues[ctx->qid];
1638         io_cq = &ena_dev->io_cq_queues[ctx->qid];
1639
1640         memset(io_sq, 0x0, sizeof(struct ena_com_io_sq));
1641         memset(io_cq, 0x0, sizeof(struct ena_com_io_cq));
1642
1643         /* Init CQ */
1644         io_cq->q_depth = ctx->queue_size;
1645         io_cq->direction = ctx->direction;
1646         io_cq->qid = ctx->qid;
1647
1648         io_cq->msix_vector = ctx->msix_vector;
1649
1650         io_sq->q_depth = ctx->queue_size;
1651         io_sq->direction = ctx->direction;
1652         io_sq->qid = ctx->qid;
1653
1654         io_sq->mem_queue_type = ctx->mem_queue_type;
1655
1656         if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1657                 /* header length is limited to 8 bits */
1658                 io_sq->tx_max_header_size =
1659                         min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1660
1661         ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1662         if (ret)
1663                 goto error;
1664         ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1665         if (ret)
1666                 goto error;
1667
1668         ret = ena_com_create_io_cq(ena_dev, io_cq);
1669         if (ret)
1670                 goto error;
1671
1672         ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1673         if (ret)
1674                 goto destroy_io_cq;
1675
1676         return 0;
1677
1678 destroy_io_cq:
1679         ena_com_destroy_io_cq(ena_dev, io_cq);
1680 error:
1681         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1682         return ret;
1683 }
1684
1685 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1686 {
1687         struct ena_com_io_sq *io_sq;
1688         struct ena_com_io_cq *io_cq;
1689
1690         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1691                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1692                        ENA_TOTAL_NUM_QUEUES);
1693                 return;
1694         }
1695
1696         io_sq = &ena_dev->io_sq_queues[qid];
1697         io_cq = &ena_dev->io_cq_queues[qid];
1698
1699         ena_com_destroy_io_sq(ena_dev, io_sq);
1700         ena_com_destroy_io_cq(ena_dev, io_cq);
1701
1702         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1703 }
1704
1705 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1706                             struct ena_admin_get_feat_resp *resp)
1707 {
1708         return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1709 }
1710
1711 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1712                               struct ena_com_dev_get_features_ctx *get_feat_ctx)
1713 {
1714         struct ena_admin_get_feat_resp get_resp;
1715         int rc;
1716
1717         rc = ena_com_get_feature(ena_dev, &get_resp,
1718                                  ENA_ADMIN_DEVICE_ATTRIBUTES);
1719         if (rc)
1720                 return rc;
1721
1722         memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1723                sizeof(get_resp.u.dev_attr));
1724         ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1725
1726         rc = ena_com_get_feature(ena_dev, &get_resp,
1727                                  ENA_ADMIN_MAX_QUEUES_NUM);
1728         if (rc)
1729                 return rc;
1730
1731         memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1732                sizeof(get_resp.u.max_queue));
1733         ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1734
1735         rc = ena_com_get_feature(ena_dev, &get_resp,
1736                                  ENA_ADMIN_AENQ_CONFIG);
1737         if (rc)
1738                 return rc;
1739
1740         memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1741                sizeof(get_resp.u.aenq));
1742
1743         rc = ena_com_get_feature(ena_dev, &get_resp,
1744                                  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1745         if (rc)
1746                 return rc;
1747
1748         memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1749                sizeof(get_resp.u.offload));
1750
1751         return 0;
1752 }
1753
1754 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1755 {
1756         ena_com_handle_admin_completion(&ena_dev->admin_queue);
1757 }
1758
1759 /* ena_handle_specific_aenq_event:
1760  * return the handler that is relevant to the specific event group
1761  */
1762 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1763                                                      u16 group)
1764 {
1765         struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1766
1767         if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1768                 return aenq_handlers->handlers[group];
1769
1770         return aenq_handlers->unimplemented_handler;
1771 }
1772
1773 /* ena_aenq_intr_handler:
1774  * handles the aenq incoming events.
1775  * pop events from the queue and apply the specific handler
1776  */
1777 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1778 {
1779         struct ena_admin_aenq_entry *aenq_e;
1780         struct ena_admin_aenq_common_desc *aenq_common;
1781         struct ena_com_aenq *aenq  = &dev->aenq;
1782         ena_aenq_handler handler_cb;
1783         u16 masked_head, processed = 0;
1784         u8 phase;
1785
1786         masked_head = aenq->head & (aenq->q_depth - 1);
1787         phase = aenq->phase;
1788         aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1789         aenq_common = &aenq_e->aenq_common_desc;
1790
1791         /* Go over all the events */
1792         while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
1793                phase) {
1794                 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1795                          aenq_common->group, aenq_common->syndrom,
1796                          (u64)aenq_common->timestamp_low +
1797                                  ((u64)aenq_common->timestamp_high << 32));
1798
1799                 /* Handle specific event*/
1800                 handler_cb = ena_com_get_specific_aenq_cb(dev,
1801                                                           aenq_common->group);
1802                 handler_cb(data, aenq_e); /* call the actual event handler*/
1803
1804                 /* Get next event entry */
1805                 masked_head++;
1806                 processed++;
1807
1808                 if (unlikely(masked_head == aenq->q_depth)) {
1809                         masked_head = 0;
1810                         phase = !phase;
1811                 }
1812                 aenq_e = &aenq->entries[masked_head];
1813                 aenq_common = &aenq_e->aenq_common_desc;
1814         }
1815
1816         aenq->head += processed;
1817         aenq->phase = phase;
1818
1819         /* Don't update aenq doorbell if there weren't any processed events */
1820         if (!processed)
1821                 return;
1822
1823         /* write the aenq doorbell after all AENQ descriptors were read */
1824         mb();
1825         writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1826 }
1827
1828 int ena_com_dev_reset(struct ena_com_dev *ena_dev)
1829 {
1830         u32 stat, timeout, cap, reset_val;
1831         int rc;
1832
1833         stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1834         cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1835
1836         if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1837                      (cap == ENA_MMIO_READ_TIMEOUT))) {
1838                 pr_err("Reg read32 timeout occurred\n");
1839                 return -ETIME;
1840         }
1841
1842         if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1843                 pr_err("Device isn't ready, can't reset device\n");
1844                 return -EINVAL;
1845         }
1846
1847         timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1848                         ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1849         if (timeout == 0) {
1850                 pr_err("Invalid timeout value\n");
1851                 return -EINVAL;
1852         }
1853
1854         /* start reset */
1855         reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1856         writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1857
1858         /* Write again the MMIO read request address */
1859         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1860
1861         rc = wait_for_reset_state(ena_dev, timeout,
1862                                   ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1863         if (rc != 0) {
1864                 pr_err("Reset indication didn't turn on\n");
1865                 return rc;
1866         }
1867
1868         /* reset done */
1869         writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1870         rc = wait_for_reset_state(ena_dev, timeout, 0);
1871         if (rc != 0) {
1872                 pr_err("Reset indication didn't turn off\n");
1873                 return rc;
1874         }
1875
1876         return 0;
1877 }
1878
1879 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1880                              struct ena_com_stats_ctx *ctx,
1881                              enum ena_admin_get_stats_type type)
1882 {
1883         struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1884         struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1885         struct ena_com_admin_queue *admin_queue;
1886         int ret;
1887
1888         admin_queue = &ena_dev->admin_queue;
1889
1890         get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1891         get_cmd->aq_common_descriptor.flags = 0;
1892         get_cmd->type = type;
1893
1894         ret =  ena_com_execute_admin_command(admin_queue,
1895                                              (struct ena_admin_aq_entry *)get_cmd,
1896                                              sizeof(*get_cmd),
1897                                              (struct ena_admin_acq_entry *)get_resp,
1898                                              sizeof(*get_resp));
1899
1900         if (unlikely(ret))
1901                 pr_err("Failed to get stats. error: %d\n", ret);
1902
1903         return ret;
1904 }
1905
1906 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1907                                 struct ena_admin_basic_stats *stats)
1908 {
1909         struct ena_com_stats_ctx ctx;
1910         int ret;
1911
1912         memset(&ctx, 0x0, sizeof(ctx));
1913         ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1914         if (likely(ret == 0))
1915                 memcpy(stats, &ctx.get_resp.basic_stats,
1916                        sizeof(ctx.get_resp.basic_stats));
1917
1918         return ret;
1919 }
1920
1921 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1922 {
1923         struct ena_com_admin_queue *admin_queue;
1924         struct ena_admin_set_feat_cmd cmd;
1925         struct ena_admin_set_feat_resp resp;
1926         int ret;
1927
1928         if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1929                 pr_info("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1930                 return -EPERM;
1931         }
1932
1933         memset(&cmd, 0x0, sizeof(cmd));
1934         admin_queue = &ena_dev->admin_queue;
1935
1936         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1937         cmd.aq_common_descriptor.flags = 0;
1938         cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1939         cmd.u.mtu.mtu = mtu;
1940
1941         ret = ena_com_execute_admin_command(admin_queue,
1942                                             (struct ena_admin_aq_entry *)&cmd,
1943                                             sizeof(cmd),
1944                                             (struct ena_admin_acq_entry *)&resp,
1945                                             sizeof(resp));
1946
1947         if (unlikely(ret))
1948                 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
1949
1950         return ret;
1951 }
1952
1953 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
1954                                  struct ena_admin_feature_offload_desc *offload)
1955 {
1956         int ret;
1957         struct ena_admin_get_feat_resp resp;
1958
1959         ret = ena_com_get_feature(ena_dev, &resp,
1960                                   ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1961         if (unlikely(ret)) {
1962                 pr_err("Failed to get offload capabilities %d\n", ret);
1963                 return ret;
1964         }
1965
1966         memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
1967
1968         return 0;
1969 }
1970
1971 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
1972 {
1973         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1974         struct ena_rss *rss = &ena_dev->rss;
1975         struct ena_admin_set_feat_cmd cmd;
1976         struct ena_admin_set_feat_resp resp;
1977         struct ena_admin_get_feat_resp get_resp;
1978         int ret;
1979
1980         if (!ena_com_check_supported_feature_id(ena_dev,
1981                                                 ENA_ADMIN_RSS_HASH_FUNCTION)) {
1982                 pr_info("Feature %d isn't supported\n",
1983                         ENA_ADMIN_RSS_HASH_FUNCTION);
1984                 return -EPERM;
1985         }
1986
1987         /* Validate hash function is supported */
1988         ret = ena_com_get_feature(ena_dev, &get_resp,
1989                                   ENA_ADMIN_RSS_HASH_FUNCTION);
1990         if (unlikely(ret))
1991                 return ret;
1992
1993         if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
1994                 pr_err("Func hash %d isn't supported by device, abort\n",
1995                        rss->hash_func);
1996                 return -EPERM;
1997         }
1998
1999         memset(&cmd, 0x0, sizeof(cmd));
2000
2001         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2002         cmd.aq_common_descriptor.flags =
2003                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2004         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2005         cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2006         cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2007
2008         ret = ena_com_mem_addr_set(ena_dev,
2009                                    &cmd.control_buffer.address,
2010                                    rss->hash_key_dma_addr);
2011         if (unlikely(ret)) {
2012                 pr_err("memory address set failed\n");
2013                 return ret;
2014         }
2015
2016         cmd.control_buffer.length = sizeof(*rss->hash_key);
2017
2018         ret = ena_com_execute_admin_command(admin_queue,
2019                                             (struct ena_admin_aq_entry *)&cmd,
2020                                             sizeof(cmd),
2021                                             (struct ena_admin_acq_entry *)&resp,
2022                                             sizeof(resp));
2023         if (unlikely(ret)) {
2024                 pr_err("Failed to set hash function %d. error: %d\n",
2025                        rss->hash_func, ret);
2026                 return -EINVAL;
2027         }
2028
2029         return 0;
2030 }
2031
2032 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2033                                enum ena_admin_hash_functions func,
2034                                const u8 *key, u16 key_len, u32 init_val)
2035 {
2036         struct ena_rss *rss = &ena_dev->rss;
2037         struct ena_admin_get_feat_resp get_resp;
2038         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2039                 rss->hash_key;
2040         int rc;
2041
2042         /* Make sure size is a mult of DWs */
2043         if (unlikely(key_len & 0x3))
2044                 return -EINVAL;
2045
2046         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2047                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2048                                     rss->hash_key_dma_addr,
2049                                     sizeof(*rss->hash_key));
2050         if (unlikely(rc))
2051                 return rc;
2052
2053         if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2054                 pr_err("Flow hash function %d isn't supported\n", func);
2055                 return -EPERM;
2056         }
2057
2058         switch (func) {
2059         case ENA_ADMIN_TOEPLITZ:
2060                 if (key) {
2061                         if (key_len != sizeof(hash_key->key)) {
2062                                 pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2063                                        key_len, sizeof(hash_key->key));
2064                                 return -EINVAL;
2065                         }
2066                         memcpy(hash_key->key, key, key_len);
2067                         rss->hash_init_val = init_val;
2068                         hash_key->keys_num = key_len >> 2;
2069                 }
2070                 break;
2071         case ENA_ADMIN_CRC32:
2072                 rss->hash_init_val = init_val;
2073                 break;
2074         default:
2075                 pr_err("Invalid hash function (%d)\n", func);
2076                 return -EINVAL;
2077         }
2078
2079         rss->hash_func = func;
2080         rc = ena_com_set_hash_function(ena_dev);
2081
2082         /* Restore the old function */
2083         if (unlikely(rc))
2084                 ena_com_get_hash_function(ena_dev, NULL, NULL);
2085
2086         return rc;
2087 }
2088
2089 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2090                               enum ena_admin_hash_functions *func,
2091                               u8 *key)
2092 {
2093         struct ena_rss *rss = &ena_dev->rss;
2094         struct ena_admin_get_feat_resp get_resp;
2095         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2096                 rss->hash_key;
2097         int rc;
2098
2099         if (unlikely(!func))
2100                 return -EINVAL;
2101
2102         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2103                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2104                                     rss->hash_key_dma_addr,
2105                                     sizeof(*rss->hash_key));
2106         if (unlikely(rc))
2107                 return rc;
2108
2109         /* ffs() returns 1 in case the lsb is set */
2110         rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2111         if (rss->hash_func)
2112                 rss->hash_func--;
2113
2114         *func = rss->hash_func;
2115
2116         if (key)
2117                 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2118
2119         return 0;
2120 }
2121
2122 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2123                           enum ena_admin_flow_hash_proto proto,
2124                           u16 *fields)
2125 {
2126         struct ena_rss *rss = &ena_dev->rss;
2127         struct ena_admin_get_feat_resp get_resp;
2128         int rc;
2129
2130         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2131                                     ENA_ADMIN_RSS_HASH_INPUT,
2132                                     rss->hash_ctrl_dma_addr,
2133                                     sizeof(*rss->hash_ctrl));
2134         if (unlikely(rc))
2135                 return rc;
2136
2137         if (fields)
2138                 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2139
2140         return 0;
2141 }
2142
2143 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2144 {
2145         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2146         struct ena_rss *rss = &ena_dev->rss;
2147         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2148         struct ena_admin_set_feat_cmd cmd;
2149         struct ena_admin_set_feat_resp resp;
2150         int ret;
2151
2152         if (!ena_com_check_supported_feature_id(ena_dev,
2153                                                 ENA_ADMIN_RSS_HASH_INPUT)) {
2154                 pr_info("Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT);
2155                 return -EPERM;
2156         }
2157
2158         memset(&cmd, 0x0, sizeof(cmd));
2159
2160         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2161         cmd.aq_common_descriptor.flags =
2162                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2163         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2164         cmd.u.flow_hash_input.enabled_input_sort =
2165                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2166                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2167
2168         ret = ena_com_mem_addr_set(ena_dev,
2169                                    &cmd.control_buffer.address,
2170                                    rss->hash_ctrl_dma_addr);
2171         if (unlikely(ret)) {
2172                 pr_err("memory address set failed\n");
2173                 return ret;
2174         }
2175         cmd.control_buffer.length = sizeof(*hash_ctrl);
2176
2177         ret = ena_com_execute_admin_command(admin_queue,
2178                                             (struct ena_admin_aq_entry *)&cmd,
2179                                             sizeof(cmd),
2180                                             (struct ena_admin_acq_entry *)&resp,
2181                                             sizeof(resp));
2182         if (unlikely(ret))
2183                 pr_err("Failed to set hash input. error: %d\n", ret);
2184
2185         return ret;
2186 }
2187
2188 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2189 {
2190         struct ena_rss *rss = &ena_dev->rss;
2191         struct ena_admin_feature_rss_hash_control *hash_ctrl =
2192                 rss->hash_ctrl;
2193         u16 available_fields = 0;
2194         int rc, i;
2195
2196         /* Get the supported hash input */
2197         rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2198         if (unlikely(rc))
2199                 return rc;
2200
2201         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2202                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2203                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2204
2205         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2206                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2207                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2208
2209         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2210                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2211                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2212
2213         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2214                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2215                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2216
2217         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2218                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2219
2220         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2221                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2222
2223         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2224                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2225
2226         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2227                 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2228
2229         for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2230                 available_fields = hash_ctrl->selected_fields[i].fields &
2231                                 hash_ctrl->supported_fields[i].fields;
2232                 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2233                         pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2234                                i, hash_ctrl->supported_fields[i].fields,
2235                                hash_ctrl->selected_fields[i].fields);
2236                         return -EPERM;
2237                 }
2238         }
2239
2240         rc = ena_com_set_hash_ctrl(ena_dev);
2241
2242         /* In case of failure, restore the old hash ctrl */
2243         if (unlikely(rc))
2244                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2245
2246         return rc;
2247 }
2248
2249 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2250                            enum ena_admin_flow_hash_proto proto,
2251                            u16 hash_fields)
2252 {
2253         struct ena_rss *rss = &ena_dev->rss;
2254         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2255         u16 supported_fields;
2256         int rc;
2257
2258         if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2259                 pr_err("Invalid proto num (%u)\n", proto);
2260                 return -EINVAL;
2261         }
2262
2263         /* Get the ctrl table */
2264         rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2265         if (unlikely(rc))
2266                 return rc;
2267
2268         /* Make sure all the fields are supported */
2269         supported_fields = hash_ctrl->supported_fields[proto].fields;
2270         if ((hash_fields & supported_fields) != hash_fields) {
2271                 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2272                        proto, hash_fields, supported_fields);
2273         }
2274
2275         hash_ctrl->selected_fields[proto].fields = hash_fields;
2276
2277         rc = ena_com_set_hash_ctrl(ena_dev);
2278
2279         /* In case of failure, restore the old hash ctrl */
2280         if (unlikely(rc))
2281                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2282
2283         return 0;
2284 }
2285
2286 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2287                                       u16 entry_idx, u16 entry_value)
2288 {
2289         struct ena_rss *rss = &ena_dev->rss;
2290
2291         if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2292                 return -EINVAL;
2293
2294         if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2295                 return -EINVAL;
2296
2297         rss->host_rss_ind_tbl[entry_idx] = entry_value;
2298
2299         return 0;
2300 }
2301
2302 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2303 {
2304         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2305         struct ena_rss *rss = &ena_dev->rss;
2306         struct ena_admin_set_feat_cmd cmd;
2307         struct ena_admin_set_feat_resp resp;
2308         int ret;
2309
2310         if (!ena_com_check_supported_feature_id(
2311                     ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2312                 pr_info("Feature %d isn't supported\n",
2313                         ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2314                 return -EPERM;
2315         }
2316
2317         ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2318         if (ret) {
2319                 pr_err("Failed to convert host indirection table to device table\n");
2320                 return ret;
2321         }
2322
2323         memset(&cmd, 0x0, sizeof(cmd));
2324
2325         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2326         cmd.aq_common_descriptor.flags =
2327                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2328         cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2329         cmd.u.ind_table.size = rss->tbl_log_size;
2330         cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2331
2332         ret = ena_com_mem_addr_set(ena_dev,
2333                                    &cmd.control_buffer.address,
2334                                    rss->rss_ind_tbl_dma_addr);
2335         if (unlikely(ret)) {
2336                 pr_err("memory address set failed\n");
2337                 return ret;
2338         }
2339
2340         cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2341                 sizeof(struct ena_admin_rss_ind_table_entry);
2342
2343         ret = ena_com_execute_admin_command(admin_queue,
2344                                             (struct ena_admin_aq_entry *)&cmd,
2345                                             sizeof(cmd),
2346                                             (struct ena_admin_acq_entry *)&resp,
2347                                             sizeof(resp));
2348
2349         if (unlikely(ret))
2350                 pr_err("Failed to set indirect table. error: %d\n", ret);
2351
2352         return ret;
2353 }
2354
2355 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2356 {
2357         struct ena_rss *rss = &ena_dev->rss;
2358         struct ena_admin_get_feat_resp get_resp;
2359         u32 tbl_size;
2360         int i, rc;
2361
2362         tbl_size = (1ULL << rss->tbl_log_size) *
2363                 sizeof(struct ena_admin_rss_ind_table_entry);
2364
2365         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2366                                     ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2367                                     rss->rss_ind_tbl_dma_addr,
2368                                     tbl_size);
2369         if (unlikely(rc))
2370                 return rc;
2371
2372         if (!ind_tbl)
2373                 return 0;
2374
2375         rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2376         if (unlikely(rc))
2377                 return rc;
2378
2379         for (i = 0; i < (1 << rss->tbl_log_size); i++)
2380                 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2381
2382         return 0;
2383 }
2384
2385 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2386 {
2387         int rc;
2388
2389         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2390
2391         rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2392         if (unlikely(rc))
2393                 goto err_indr_tbl;
2394
2395         rc = ena_com_hash_key_allocate(ena_dev);
2396         if (unlikely(rc))
2397                 goto err_hash_key;
2398
2399         ena_com_hash_key_fill_default_key(ena_dev);
2400
2401         rc = ena_com_hash_ctrl_init(ena_dev);
2402         if (unlikely(rc))
2403                 goto err_hash_ctrl;
2404
2405         return 0;
2406
2407 err_hash_ctrl:
2408         ena_com_hash_key_destroy(ena_dev);
2409 err_hash_key:
2410         ena_com_indirect_table_destroy(ena_dev);
2411 err_indr_tbl:
2412
2413         return rc;
2414 }
2415
2416 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2417 {
2418         ena_com_indirect_table_destroy(ena_dev);
2419         ena_com_hash_key_destroy(ena_dev);
2420         ena_com_hash_ctrl_destroy(ena_dev);
2421
2422         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2423 }
2424
2425 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2426 {
2427         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2428
2429         host_attr->host_info =
2430                 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2431                                     &host_attr->host_info_dma_addr, GFP_KERNEL);
2432         if (unlikely(!host_attr->host_info))
2433                 return -ENOMEM;
2434
2435         return 0;
2436 }
2437
2438 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2439                                 u32 debug_area_size)
2440 {
2441         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2442
2443         host_attr->debug_area_virt_addr =
2444                 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2445                                     &host_attr->debug_area_dma_addr, GFP_KERNEL);
2446         if (unlikely(!host_attr->debug_area_virt_addr)) {
2447                 host_attr->debug_area_size = 0;
2448                 return -ENOMEM;
2449         }
2450
2451         host_attr->debug_area_size = debug_area_size;
2452
2453         return 0;
2454 }
2455
2456 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2457 {
2458         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2459
2460         if (host_attr->host_info) {
2461                 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2462                                   host_attr->host_info_dma_addr);
2463                 host_attr->host_info = NULL;
2464         }
2465 }
2466
2467 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2468 {
2469         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2470
2471         if (host_attr->debug_area_virt_addr) {
2472                 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2473                                   host_attr->debug_area_virt_addr,
2474                                   host_attr->debug_area_dma_addr);
2475                 host_attr->debug_area_virt_addr = NULL;
2476         }
2477 }
2478
2479 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2480 {
2481         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2482         struct ena_com_admin_queue *admin_queue;
2483         struct ena_admin_set_feat_cmd cmd;
2484         struct ena_admin_set_feat_resp resp;
2485
2486         int ret;
2487
2488         if (!ena_com_check_supported_feature_id(ena_dev,
2489                                                 ENA_ADMIN_HOST_ATTR_CONFIG)) {
2490                 pr_warn("Set host attribute isn't supported\n");
2491                 return -EPERM;
2492         }
2493
2494         memset(&cmd, 0x0, sizeof(cmd));
2495         admin_queue = &ena_dev->admin_queue;
2496
2497         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2498         cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2499
2500         ret = ena_com_mem_addr_set(ena_dev,
2501                                    &cmd.u.host_attr.debug_ba,
2502                                    host_attr->debug_area_dma_addr);
2503         if (unlikely(ret)) {
2504                 pr_err("memory address set failed\n");
2505                 return ret;
2506         }
2507
2508         ret = ena_com_mem_addr_set(ena_dev,
2509                                    &cmd.u.host_attr.os_info_ba,
2510                                    host_attr->host_info_dma_addr);
2511         if (unlikely(ret)) {
2512                 pr_err("memory address set failed\n");
2513                 return ret;
2514         }
2515
2516         cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2517
2518         ret = ena_com_execute_admin_command(admin_queue,
2519                                             (struct ena_admin_aq_entry *)&cmd,
2520                                             sizeof(cmd),
2521                                             (struct ena_admin_acq_entry *)&resp,
2522                                             sizeof(resp));
2523
2524         if (unlikely(ret))
2525                 pr_err("Failed to set host attributes: %d\n", ret);
2526
2527         return ret;
2528 }
2529
2530 /* Interrupt moderation */
2531 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2532 {
2533         return ena_com_check_supported_feature_id(ena_dev,
2534                                                   ENA_ADMIN_INTERRUPT_MODERATION);
2535 }
2536
2537 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2538                                                       u32 tx_coalesce_usecs)
2539 {
2540         if (!ena_dev->intr_delay_resolution) {
2541                 pr_err("Illegal interrupt delay granularity value\n");
2542                 return -EFAULT;
2543         }
2544
2545         ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2546                 ena_dev->intr_delay_resolution;
2547
2548         return 0;
2549 }
2550
2551 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2552                                                       u32 rx_coalesce_usecs)
2553 {
2554         if (!ena_dev->intr_delay_resolution) {
2555                 pr_err("Illegal interrupt delay granularity value\n");
2556                 return -EFAULT;
2557         }
2558
2559         /* We use LOWEST entry of moderation table for storing
2560          * nonadaptive interrupt coalescing values
2561          */
2562         ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2563                 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2564
2565         return 0;
2566 }
2567
2568 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2569 {
2570         if (ena_dev->intr_moder_tbl)
2571                 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2572         ena_dev->intr_moder_tbl = NULL;
2573 }
2574
2575 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2576 {
2577         struct ena_admin_get_feat_resp get_resp;
2578         u16 delay_resolution;
2579         int rc;
2580
2581         rc = ena_com_get_feature(ena_dev, &get_resp,
2582                                  ENA_ADMIN_INTERRUPT_MODERATION);
2583
2584         if (rc) {
2585                 if (rc == -EPERM) {
2586                         pr_info("Feature %d isn't supported\n",
2587                                 ENA_ADMIN_INTERRUPT_MODERATION);
2588                         rc = 0;
2589                 } else {
2590                         pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2591                                rc);
2592                 }
2593
2594                 /* no moderation supported, disable adaptive support */
2595                 ena_com_disable_adaptive_moderation(ena_dev);
2596                 return rc;
2597         }
2598
2599         rc = ena_com_init_interrupt_moderation_table(ena_dev);
2600         if (rc)
2601                 goto err;
2602
2603         /* if moderation is supported by device we set adaptive moderation */
2604         delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2605         ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2606         ena_com_enable_adaptive_moderation(ena_dev);
2607
2608         return 0;
2609 err:
2610         ena_com_destroy_interrupt_moderation(ena_dev);
2611         return rc;
2612 }
2613
2614 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2615 {
2616         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2617
2618         if (!intr_moder_tbl)
2619                 return;
2620
2621         intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2622                 ENA_INTR_LOWEST_USECS;
2623         intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2624                 ENA_INTR_LOWEST_PKTS;
2625         intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2626                 ENA_INTR_LOWEST_BYTES;
2627
2628         intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2629                 ENA_INTR_LOW_USECS;
2630         intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2631                 ENA_INTR_LOW_PKTS;
2632         intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2633                 ENA_INTR_LOW_BYTES;
2634
2635         intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2636                 ENA_INTR_MID_USECS;
2637         intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2638                 ENA_INTR_MID_PKTS;
2639         intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2640                 ENA_INTR_MID_BYTES;
2641
2642         intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2643                 ENA_INTR_HIGH_USECS;
2644         intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2645                 ENA_INTR_HIGH_PKTS;
2646         intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2647                 ENA_INTR_HIGH_BYTES;
2648
2649         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2650                 ENA_INTR_HIGHEST_USECS;
2651         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2652                 ENA_INTR_HIGHEST_PKTS;
2653         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2654                 ENA_INTR_HIGHEST_BYTES;
2655 }
2656
2657 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2658 {
2659         return ena_dev->intr_moder_tx_interval;
2660 }
2661
2662 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2663 {
2664         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2665
2666         if (intr_moder_tbl)
2667                 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2668
2669         return 0;
2670 }
2671
2672 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2673                                         enum ena_intr_moder_level level,
2674                                         struct ena_intr_moder_entry *entry)
2675 {
2676         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2677
2678         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2679                 return;
2680
2681         intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2682         if (ena_dev->intr_delay_resolution)
2683                 intr_moder_tbl[level].intr_moder_interval /=
2684                         ena_dev->intr_delay_resolution;
2685         intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2686
2687         /* use hardcoded value until ethtool supports bytecount parameter */
2688         if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2689                 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2690 }
2691
2692 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2693                                        enum ena_intr_moder_level level,
2694                                        struct ena_intr_moder_entry *entry)
2695 {
2696         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2697
2698         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2699                 return;
2700
2701         entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2702         if (ena_dev->intr_delay_resolution)
2703                 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2704         entry->pkts_per_interval =
2705         intr_moder_tbl[level].pkts_per_interval;
2706         entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2707 }