GNU Linux-libre 4.14.266-gnu1
[releases.git] / drivers / net / ethernet / amazon / ena / ena_com.c
1 /*
2  * Copyright 2015 Amazon.com, Inc. or its affiliates.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include "ena_com.h"
34
35 /*****************************************************************************/
36 /*****************************************************************************/
37
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
40
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
43
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45                 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46                 | (ENA_COMMON_SPEC_VERSION_MINOR))
47
48 #define ENA_CTRL_MAJOR          0
49 #define ENA_CTRL_MINOR          0
50 #define ENA_CTRL_SUB_MINOR      1
51
52 #define MIN_ENA_CTRL_VER \
53         (((ENA_CTRL_MAJOR) << \
54         (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55         ((ENA_CTRL_MINOR) << \
56         (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
57         (ENA_CTRL_SUB_MINOR))
58
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x)   ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x)  ((u32)(((u64)(x)) >> 32))
61
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
63
64 #define ENA_REGS_ADMIN_INTR_MASK 1
65
66 /*****************************************************************************/
67 /*****************************************************************************/
68 /*****************************************************************************/
69
70 enum ena_cmd_status {
71         ENA_CMD_SUBMITTED,
72         ENA_CMD_COMPLETED,
73         /* Abort - canceled by the driver */
74         ENA_CMD_ABORTED,
75 };
76
77 struct ena_comp_ctx {
78         struct completion wait_event;
79         struct ena_admin_acq_entry *user_cqe;
80         u32 comp_size;
81         enum ena_cmd_status status;
82         /* status from the device */
83         u8 comp_status;
84         u8 cmd_opcode;
85         bool occupied;
86 };
87
88 struct ena_com_stats_ctx {
89         struct ena_admin_aq_get_stats_cmd get_cmd;
90         struct ena_admin_acq_get_stats_resp get_resp;
91 };
92
93 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
94                                        struct ena_common_mem_addr *ena_addr,
95                                        dma_addr_t addr)
96 {
97         if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
98                 pr_err("dma address has more bits that the device supports\n");
99                 return -EINVAL;
100         }
101
102         ena_addr->mem_addr_low = lower_32_bits(addr);
103         ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
104
105         return 0;
106 }
107
108 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
109 {
110         struct ena_com_admin_sq *sq = &queue->sq;
111         u16 size = ADMIN_SQ_SIZE(queue->q_depth);
112
113         sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
114                                           GFP_KERNEL);
115
116         if (!sq->entries) {
117                 pr_err("memory allocation failed");
118                 return -ENOMEM;
119         }
120
121         sq->head = 0;
122         sq->tail = 0;
123         sq->phase = 1;
124
125         sq->db_addr = NULL;
126
127         return 0;
128 }
129
130 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
131 {
132         struct ena_com_admin_cq *cq = &queue->cq;
133         u16 size = ADMIN_CQ_SIZE(queue->q_depth);
134
135         cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
136                                           GFP_KERNEL);
137
138         if (!cq->entries) {
139                 pr_err("memory allocation failed");
140                 return -ENOMEM;
141         }
142
143         cq->head = 0;
144         cq->phase = 1;
145
146         return 0;
147 }
148
149 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
150                                    struct ena_aenq_handlers *aenq_handlers)
151 {
152         struct ena_com_aenq *aenq = &dev->aenq;
153         u32 addr_low, addr_high, aenq_caps;
154         u16 size;
155
156         dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
157         size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
158         aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
159                                             GFP_KERNEL);
160
161         if (!aenq->entries) {
162                 pr_err("memory allocation failed");
163                 return -ENOMEM;
164         }
165
166         aenq->head = aenq->q_depth;
167         aenq->phase = 1;
168
169         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
170         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
171
172         writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
173         writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
174
175         aenq_caps = 0;
176         aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
177         aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
178                       << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
179                      ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
180         writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
181
182         if (unlikely(!aenq_handlers)) {
183                 pr_err("aenq handlers pointer is NULL\n");
184                 return -EINVAL;
185         }
186
187         aenq->aenq_handlers = aenq_handlers;
188
189         return 0;
190 }
191
192 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
193                                      struct ena_comp_ctx *comp_ctx)
194 {
195         comp_ctx->occupied = false;
196         atomic_dec(&queue->outstanding_cmds);
197 }
198
199 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
200                                           u16 command_id, bool capture)
201 {
202         if (unlikely(!queue->comp_ctx)) {
203                 pr_err("Completion context is NULL\n");
204                 return NULL;
205         }
206
207         if (unlikely(command_id >= queue->q_depth)) {
208                 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
209                        command_id, queue->q_depth);
210                 return NULL;
211         }
212
213         if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
214                 pr_err("Completion context is occupied\n");
215                 return NULL;
216         }
217
218         if (capture) {
219                 atomic_inc(&queue->outstanding_cmds);
220                 queue->comp_ctx[command_id].occupied = true;
221         }
222
223         return &queue->comp_ctx[command_id];
224 }
225
226 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
227                                                        struct ena_admin_aq_entry *cmd,
228                                                        size_t cmd_size_in_bytes,
229                                                        struct ena_admin_acq_entry *comp,
230                                                        size_t comp_size_in_bytes)
231 {
232         struct ena_comp_ctx *comp_ctx;
233         u16 tail_masked, cmd_id;
234         u16 queue_size_mask;
235         u16 cnt;
236
237         queue_size_mask = admin_queue->q_depth - 1;
238
239         tail_masked = admin_queue->sq.tail & queue_size_mask;
240
241         /* In case of queue FULL */
242         cnt = atomic_read(&admin_queue->outstanding_cmds);
243         if (cnt >= admin_queue->q_depth) {
244                 pr_debug("admin queue is full.\n");
245                 admin_queue->stats.out_of_space++;
246                 return ERR_PTR(-ENOSPC);
247         }
248
249         cmd_id = admin_queue->curr_cmd_id;
250
251         cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
252                 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
253
254         cmd->aq_common_descriptor.command_id |= cmd_id &
255                 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
256
257         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
258         if (unlikely(!comp_ctx))
259                 return ERR_PTR(-EINVAL);
260
261         comp_ctx->status = ENA_CMD_SUBMITTED;
262         comp_ctx->comp_size = (u32)comp_size_in_bytes;
263         comp_ctx->user_cqe = comp;
264         comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
265
266         reinit_completion(&comp_ctx->wait_event);
267
268         memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
269
270         admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
271                 queue_size_mask;
272
273         admin_queue->sq.tail++;
274         admin_queue->stats.submitted_cmd++;
275
276         if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
277                 admin_queue->sq.phase = !admin_queue->sq.phase;
278
279         writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
280
281         return comp_ctx;
282 }
283
284 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
285 {
286         size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
287         struct ena_comp_ctx *comp_ctx;
288         u16 i;
289
290         queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
291         if (unlikely(!queue->comp_ctx)) {
292                 pr_err("memory allocation failed");
293                 return -ENOMEM;
294         }
295
296         for (i = 0; i < queue->q_depth; i++) {
297                 comp_ctx = get_comp_ctxt(queue, i, false);
298                 if (comp_ctx)
299                         init_completion(&comp_ctx->wait_event);
300         }
301
302         return 0;
303 }
304
305 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
306                                                      struct ena_admin_aq_entry *cmd,
307                                                      size_t cmd_size_in_bytes,
308                                                      struct ena_admin_acq_entry *comp,
309                                                      size_t comp_size_in_bytes)
310 {
311         unsigned long flags;
312         struct ena_comp_ctx *comp_ctx;
313
314         spin_lock_irqsave(&admin_queue->q_lock, flags);
315         if (unlikely(!admin_queue->running_state)) {
316                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
317                 return ERR_PTR(-ENODEV);
318         }
319         comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
320                                               cmd_size_in_bytes,
321                                               comp,
322                                               comp_size_in_bytes);
323         if (unlikely(IS_ERR(comp_ctx)))
324                 admin_queue->running_state = false;
325         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
326
327         return comp_ctx;
328 }
329
330 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
331                               struct ena_com_create_io_ctx *ctx,
332                               struct ena_com_io_sq *io_sq)
333 {
334         size_t size;
335         int dev_node = 0;
336
337         memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
338
339         io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
340         io_sq->desc_entry_size =
341                 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
342                 sizeof(struct ena_eth_io_tx_desc) :
343                 sizeof(struct ena_eth_io_rx_desc);
344
345         size = io_sq->desc_entry_size * io_sq->q_depth;
346
347         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
348                 dev_node = dev_to_node(ena_dev->dmadev);
349                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
350                 io_sq->desc_addr.virt_addr =
351                         dma_zalloc_coherent(ena_dev->dmadev, size,
352                                             &io_sq->desc_addr.phys_addr,
353                                             GFP_KERNEL);
354                 set_dev_node(ena_dev->dmadev, dev_node);
355                 if (!io_sq->desc_addr.virt_addr) {
356                         io_sq->desc_addr.virt_addr =
357                                 dma_zalloc_coherent(ena_dev->dmadev, size,
358                                                     &io_sq->desc_addr.phys_addr,
359                                                     GFP_KERNEL);
360                 }
361         } else {
362                 dev_node = dev_to_node(ena_dev->dmadev);
363                 set_dev_node(ena_dev->dmadev, ctx->numa_node);
364                 io_sq->desc_addr.virt_addr =
365                         devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
366                 set_dev_node(ena_dev->dmadev, dev_node);
367                 if (!io_sq->desc_addr.virt_addr) {
368                         io_sq->desc_addr.virt_addr =
369                                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
370                 }
371         }
372
373         if (!io_sq->desc_addr.virt_addr) {
374                 pr_err("memory allocation failed");
375                 return -ENOMEM;
376         }
377
378         io_sq->tail = 0;
379         io_sq->next_to_comp = 0;
380         io_sq->phase = 1;
381
382         return 0;
383 }
384
385 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
386                               struct ena_com_create_io_ctx *ctx,
387                               struct ena_com_io_cq *io_cq)
388 {
389         size_t size;
390         int prev_node = 0;
391
392         memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
393
394         /* Use the basic completion descriptor for Rx */
395         io_cq->cdesc_entry_size_in_bytes =
396                 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
397                 sizeof(struct ena_eth_io_tx_cdesc) :
398                 sizeof(struct ena_eth_io_rx_cdesc_base);
399
400         size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
401
402         prev_node = dev_to_node(ena_dev->dmadev);
403         set_dev_node(ena_dev->dmadev, ctx->numa_node);
404         io_cq->cdesc_addr.virt_addr =
405                 dma_zalloc_coherent(ena_dev->dmadev, size,
406                                     &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
407         set_dev_node(ena_dev->dmadev, prev_node);
408         if (!io_cq->cdesc_addr.virt_addr) {
409                 io_cq->cdesc_addr.virt_addr =
410                         dma_zalloc_coherent(ena_dev->dmadev, size,
411                                             &io_cq->cdesc_addr.phys_addr,
412                                             GFP_KERNEL);
413         }
414
415         if (!io_cq->cdesc_addr.virt_addr) {
416                 pr_err("memory allocation failed");
417                 return -ENOMEM;
418         }
419
420         io_cq->phase = 1;
421         io_cq->head = 0;
422
423         return 0;
424 }
425
426 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
427                                                    struct ena_admin_acq_entry *cqe)
428 {
429         struct ena_comp_ctx *comp_ctx;
430         u16 cmd_id;
431
432         cmd_id = cqe->acq_common_descriptor.command &
433                 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
434
435         comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
436         if (unlikely(!comp_ctx)) {
437                 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
438                 admin_queue->running_state = false;
439                 return;
440         }
441
442         comp_ctx->status = ENA_CMD_COMPLETED;
443         comp_ctx->comp_status = cqe->acq_common_descriptor.status;
444
445         if (comp_ctx->user_cqe)
446                 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
447
448         if (!admin_queue->polling)
449                 complete(&comp_ctx->wait_event);
450 }
451
452 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
453 {
454         struct ena_admin_acq_entry *cqe = NULL;
455         u16 comp_num = 0;
456         u16 head_masked;
457         u8 phase;
458
459         head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
460         phase = admin_queue->cq.phase;
461
462         cqe = &admin_queue->cq.entries[head_masked];
463
464         /* Go over all the completions */
465         while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
466                         ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
467                 /* Do not read the rest of the completion entry before the
468                  * phase bit was validated
469                  */
470                 rmb();
471                 ena_com_handle_single_admin_completion(admin_queue, cqe);
472
473                 head_masked++;
474                 comp_num++;
475                 if (unlikely(head_masked == admin_queue->q_depth)) {
476                         head_masked = 0;
477                         phase = !phase;
478                 }
479
480                 cqe = &admin_queue->cq.entries[head_masked];
481         }
482
483         admin_queue->cq.head += comp_num;
484         admin_queue->cq.phase = phase;
485         admin_queue->sq.head += comp_num;
486         admin_queue->stats.completed_cmd += comp_num;
487 }
488
489 static int ena_com_comp_status_to_errno(u8 comp_status)
490 {
491         if (unlikely(comp_status != 0))
492                 pr_err("admin command failed[%u]\n", comp_status);
493
494         if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
495                 return -EINVAL;
496
497         switch (comp_status) {
498         case ENA_ADMIN_SUCCESS:
499                 return 0;
500         case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
501                 return -ENOMEM;
502         case ENA_ADMIN_UNSUPPORTED_OPCODE:
503                 return -EOPNOTSUPP;
504         case ENA_ADMIN_BAD_OPCODE:
505         case ENA_ADMIN_MALFORMED_REQUEST:
506         case ENA_ADMIN_ILLEGAL_PARAMETER:
507         case ENA_ADMIN_UNKNOWN_ERROR:
508                 return -EINVAL;
509         }
510
511         return 0;
512 }
513
514 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
515                                                      struct ena_com_admin_queue *admin_queue)
516 {
517         unsigned long flags, timeout;
518         int ret;
519
520         timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
521
522         while (1) {
523                 spin_lock_irqsave(&admin_queue->q_lock, flags);
524                 ena_com_handle_admin_completion(admin_queue);
525                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
526
527                 if (comp_ctx->status != ENA_CMD_SUBMITTED)
528                         break;
529
530                 if (time_is_before_jiffies(timeout)) {
531                         pr_err("Wait for completion (polling) timeout\n");
532                         /* ENA didn't have any completion */
533                         spin_lock_irqsave(&admin_queue->q_lock, flags);
534                         admin_queue->stats.no_completion++;
535                         admin_queue->running_state = false;
536                         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
537
538                         ret = -ETIME;
539                         goto err;
540                 }
541
542                 msleep(100);
543         }
544
545         if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
546                 pr_err("Command was aborted\n");
547                 spin_lock_irqsave(&admin_queue->q_lock, flags);
548                 admin_queue->stats.aborted_cmd++;
549                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
550                 ret = -ENODEV;
551                 goto err;
552         }
553
554         WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
555              comp_ctx->status);
556
557         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
558 err:
559         comp_ctxt_release(admin_queue, comp_ctx);
560         return ret;
561 }
562
563 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
564                                                         struct ena_com_admin_queue *admin_queue)
565 {
566         unsigned long flags;
567         int ret;
568
569         wait_for_completion_timeout(&comp_ctx->wait_event,
570                                     usecs_to_jiffies(
571                                             admin_queue->completion_timeout));
572
573         /* In case the command wasn't completed find out the root cause.
574          * There might be 2 kinds of errors
575          * 1) No completion (timeout reached)
576          * 2) There is completion but the device didn't get any msi-x interrupt.
577          */
578         if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
579                 spin_lock_irqsave(&admin_queue->q_lock, flags);
580                 ena_com_handle_admin_completion(admin_queue);
581                 admin_queue->stats.no_completion++;
582                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
583
584                 if (comp_ctx->status == ENA_CMD_COMPLETED)
585                         pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
586                                comp_ctx->cmd_opcode);
587                 else
588                         pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
589                                comp_ctx->cmd_opcode, comp_ctx->status);
590
591                 admin_queue->running_state = false;
592                 ret = -ETIME;
593                 goto err;
594         }
595
596         ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
597 err:
598         comp_ctxt_release(admin_queue, comp_ctx);
599         return ret;
600 }
601
602 /* This method read the hardware device register through posting writes
603  * and waiting for response
604  * On timeout the function will return ENA_MMIO_READ_TIMEOUT
605  */
606 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
607 {
608         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
609         volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
610                 mmio_read->read_resp;
611         u32 mmio_read_reg, ret, i;
612         unsigned long flags;
613         u32 timeout = mmio_read->reg_read_to;
614
615         might_sleep();
616
617         if (timeout == 0)
618                 timeout = ENA_REG_READ_TIMEOUT;
619
620         /* If readless is disabled, perform regular read */
621         if (!mmio_read->readless_supported)
622                 return readl(ena_dev->reg_bar + offset);
623
624         spin_lock_irqsave(&mmio_read->lock, flags);
625         mmio_read->seq_num++;
626
627         read_resp->req_id = mmio_read->seq_num + 0xDEAD;
628         mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
629                         ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
630         mmio_read_reg |= mmio_read->seq_num &
631                         ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
632
633         /* make sure read_resp->req_id get updated before the hw can write
634          * there
635          */
636         wmb();
637
638         writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
639
640         for (i = 0; i < timeout; i++) {
641                 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
642                         break;
643
644                 udelay(1);
645         }
646
647         if (unlikely(i == timeout)) {
648                 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
649                        mmio_read->seq_num, offset, read_resp->req_id,
650                        read_resp->reg_off);
651                 ret = ENA_MMIO_READ_TIMEOUT;
652                 goto err;
653         }
654
655         if (read_resp->reg_off != offset) {
656                 pr_err("Read failure: wrong offset provided");
657                 ret = ENA_MMIO_READ_TIMEOUT;
658         } else {
659                 ret = read_resp->reg_val;
660         }
661 err:
662         spin_unlock_irqrestore(&mmio_read->lock, flags);
663
664         return ret;
665 }
666
667 /* There are two types to wait for completion.
668  * Polling mode - wait until the completion is available.
669  * Async mode - wait on wait queue until the completion is ready
670  * (or the timeout expired).
671  * It is expected that the IRQ called ena_com_handle_admin_completion
672  * to mark the completions.
673  */
674 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
675                                              struct ena_com_admin_queue *admin_queue)
676 {
677         if (admin_queue->polling)
678                 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
679                                                                  admin_queue);
680
681         return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
682                                                             admin_queue);
683 }
684
685 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
686                                  struct ena_com_io_sq *io_sq)
687 {
688         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
689         struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
690         struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
691         u8 direction;
692         int ret;
693
694         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
695
696         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
697                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
698         else
699                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
700
701         destroy_cmd.sq.sq_identity |= (direction <<
702                 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
703                 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
704
705         destroy_cmd.sq.sq_idx = io_sq->idx;
706         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
707
708         ret = ena_com_execute_admin_command(admin_queue,
709                                             (struct ena_admin_aq_entry *)&destroy_cmd,
710                                             sizeof(destroy_cmd),
711                                             (struct ena_admin_acq_entry *)&destroy_resp,
712                                             sizeof(destroy_resp));
713
714         if (unlikely(ret && (ret != -ENODEV)))
715                 pr_err("failed to destroy io sq error: %d\n", ret);
716
717         return ret;
718 }
719
720 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
721                                   struct ena_com_io_sq *io_sq,
722                                   struct ena_com_io_cq *io_cq)
723 {
724         size_t size;
725
726         if (io_cq->cdesc_addr.virt_addr) {
727                 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
728
729                 dma_free_coherent(ena_dev->dmadev, size,
730                                   io_cq->cdesc_addr.virt_addr,
731                                   io_cq->cdesc_addr.phys_addr);
732
733                 io_cq->cdesc_addr.virt_addr = NULL;
734         }
735
736         if (io_sq->desc_addr.virt_addr) {
737                 size = io_sq->desc_entry_size * io_sq->q_depth;
738
739                 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
740                         dma_free_coherent(ena_dev->dmadev, size,
741                                           io_sq->desc_addr.virt_addr,
742                                           io_sq->desc_addr.phys_addr);
743                 else
744                         devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
745
746                 io_sq->desc_addr.virt_addr = NULL;
747         }
748 }
749
750 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
751                                 u16 exp_state)
752 {
753         u32 val, i;
754
755         for (i = 0; i < timeout; i++) {
756                 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
757
758                 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
759                         pr_err("Reg read timeout occurred\n");
760                         return -ETIME;
761                 }
762
763                 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
764                         exp_state)
765                         return 0;
766
767                 /* The resolution of the timeout is 100ms */
768                 msleep(100);
769         }
770
771         return -ETIME;
772 }
773
774 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
775                                                enum ena_admin_aq_feature_id feature_id)
776 {
777         u32 feature_mask = 1 << feature_id;
778
779         /* Device attributes is always supported */
780         if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
781             !(ena_dev->supported_features & feature_mask))
782                 return false;
783
784         return true;
785 }
786
787 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
788                                   struct ena_admin_get_feat_resp *get_resp,
789                                   enum ena_admin_aq_feature_id feature_id,
790                                   dma_addr_t control_buf_dma_addr,
791                                   u32 control_buff_size)
792 {
793         struct ena_com_admin_queue *admin_queue;
794         struct ena_admin_get_feat_cmd get_cmd;
795         int ret;
796
797         if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
798                 pr_debug("Feature %d isn't supported\n", feature_id);
799                 return -EOPNOTSUPP;
800         }
801
802         memset(&get_cmd, 0x0, sizeof(get_cmd));
803         admin_queue = &ena_dev->admin_queue;
804
805         get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
806
807         if (control_buff_size)
808                 get_cmd.aq_common_descriptor.flags =
809                         ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
810         else
811                 get_cmd.aq_common_descriptor.flags = 0;
812
813         ret = ena_com_mem_addr_set(ena_dev,
814                                    &get_cmd.control_buffer.address,
815                                    control_buf_dma_addr);
816         if (unlikely(ret)) {
817                 pr_err("memory address set failed\n");
818                 return ret;
819         }
820
821         get_cmd.control_buffer.length = control_buff_size;
822
823         get_cmd.feat_common.feature_id = feature_id;
824
825         ret = ena_com_execute_admin_command(admin_queue,
826                                             (struct ena_admin_aq_entry *)
827                                             &get_cmd,
828                                             sizeof(get_cmd),
829                                             (struct ena_admin_acq_entry *)
830                                             get_resp,
831                                             sizeof(*get_resp));
832
833         if (unlikely(ret))
834                 pr_err("Failed to submit get_feature command %d error: %d\n",
835                        feature_id, ret);
836
837         return ret;
838 }
839
840 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
841                                struct ena_admin_get_feat_resp *get_resp,
842                                enum ena_admin_aq_feature_id feature_id)
843 {
844         return ena_com_get_feature_ex(ena_dev,
845                                       get_resp,
846                                       feature_id,
847                                       0,
848                                       0);
849 }
850
851 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
852 {
853         struct ena_admin_feature_rss_flow_hash_control *hash_key =
854                 (ena_dev->rss).hash_key;
855
856         netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
857         /* The key is stored in the device in u32 array
858          * as well as the API requires the key to be passed in this
859          * format. Thus the size of our array should be divided by 4
860          */
861         hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
862 }
863
864 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
865 {
866         return ena_dev->rss.hash_func;
867 }
868
869 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
870 {
871         struct ena_rss *rss = &ena_dev->rss;
872
873         rss->hash_key =
874                 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
875                                     &rss->hash_key_dma_addr, GFP_KERNEL);
876
877         if (unlikely(!rss->hash_key))
878                 return -ENOMEM;
879
880         return 0;
881 }
882
883 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
884 {
885         struct ena_rss *rss = &ena_dev->rss;
886
887         if (rss->hash_key)
888                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
889                                   rss->hash_key, rss->hash_key_dma_addr);
890         rss->hash_key = NULL;
891 }
892
893 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
894 {
895         struct ena_rss *rss = &ena_dev->rss;
896
897         rss->hash_ctrl =
898                 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
899                                     &rss->hash_ctrl_dma_addr, GFP_KERNEL);
900
901         if (unlikely(!rss->hash_ctrl))
902                 return -ENOMEM;
903
904         return 0;
905 }
906
907 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
908 {
909         struct ena_rss *rss = &ena_dev->rss;
910
911         if (rss->hash_ctrl)
912                 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
913                                   rss->hash_ctrl, rss->hash_ctrl_dma_addr);
914         rss->hash_ctrl = NULL;
915 }
916
917 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
918                                            u16 log_size)
919 {
920         struct ena_rss *rss = &ena_dev->rss;
921         struct ena_admin_get_feat_resp get_resp;
922         size_t tbl_size;
923         int ret;
924
925         ret = ena_com_get_feature(ena_dev, &get_resp,
926                                   ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
927         if (unlikely(ret))
928                 return ret;
929
930         if ((get_resp.u.ind_table.min_size > log_size) ||
931             (get_resp.u.ind_table.max_size < log_size)) {
932                 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
933                        1 << log_size, 1 << get_resp.u.ind_table.min_size,
934                        1 << get_resp.u.ind_table.max_size);
935                 return -EINVAL;
936         }
937
938         tbl_size = (1ULL << log_size) *
939                 sizeof(struct ena_admin_rss_ind_table_entry);
940
941         rss->rss_ind_tbl =
942                 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
943                                     &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
944         if (unlikely(!rss->rss_ind_tbl))
945                 goto mem_err1;
946
947         tbl_size = (1ULL << log_size) * sizeof(u16);
948         rss->host_rss_ind_tbl =
949                 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
950         if (unlikely(!rss->host_rss_ind_tbl))
951                 goto mem_err2;
952
953         rss->tbl_log_size = log_size;
954
955         return 0;
956
957 mem_err2:
958         tbl_size = (1ULL << log_size) *
959                 sizeof(struct ena_admin_rss_ind_table_entry);
960
961         dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
962                           rss->rss_ind_tbl_dma_addr);
963         rss->rss_ind_tbl = NULL;
964 mem_err1:
965         rss->tbl_log_size = 0;
966         return -ENOMEM;
967 }
968
969 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
970 {
971         struct ena_rss *rss = &ena_dev->rss;
972         size_t tbl_size = (1ULL << rss->tbl_log_size) *
973                 sizeof(struct ena_admin_rss_ind_table_entry);
974
975         if (rss->rss_ind_tbl)
976                 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
977                                   rss->rss_ind_tbl_dma_addr);
978         rss->rss_ind_tbl = NULL;
979
980         if (rss->host_rss_ind_tbl)
981                 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
982         rss->host_rss_ind_tbl = NULL;
983 }
984
985 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
986                                 struct ena_com_io_sq *io_sq, u16 cq_idx)
987 {
988         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
989         struct ena_admin_aq_create_sq_cmd create_cmd;
990         struct ena_admin_acq_create_sq_resp_desc cmd_completion;
991         u8 direction;
992         int ret;
993
994         memset(&create_cmd, 0x0, sizeof(create_cmd));
995
996         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
997
998         if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
999                 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1000         else
1001                 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1002
1003         create_cmd.sq_identity |= (direction <<
1004                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1005                 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1006
1007         create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1008                 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1009
1010         create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1011                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1012                 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1013
1014         create_cmd.sq_caps_3 |=
1015                 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1016
1017         create_cmd.cq_idx = cq_idx;
1018         create_cmd.sq_depth = io_sq->q_depth;
1019
1020         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1021                 ret = ena_com_mem_addr_set(ena_dev,
1022                                            &create_cmd.sq_ba,
1023                                            io_sq->desc_addr.phys_addr);
1024                 if (unlikely(ret)) {
1025                         pr_err("memory address set failed\n");
1026                         return ret;
1027                 }
1028         }
1029
1030         ret = ena_com_execute_admin_command(admin_queue,
1031                                             (struct ena_admin_aq_entry *)&create_cmd,
1032                                             sizeof(create_cmd),
1033                                             (struct ena_admin_acq_entry *)&cmd_completion,
1034                                             sizeof(cmd_completion));
1035         if (unlikely(ret)) {
1036                 pr_err("Failed to create IO SQ. error: %d\n", ret);
1037                 return ret;
1038         }
1039
1040         io_sq->idx = cmd_completion.sq_idx;
1041
1042         io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1043                 (uintptr_t)cmd_completion.sq_doorbell_offset);
1044
1045         if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1046                 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1047                                 + cmd_completion.llq_headers_offset);
1048
1049                 io_sq->desc_addr.pbuf_dev_addr =
1050                         (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1051                         cmd_completion.llq_descriptors_offset);
1052         }
1053
1054         pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1055
1056         return ret;
1057 }
1058
1059 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1060 {
1061         struct ena_rss *rss = &ena_dev->rss;
1062         struct ena_com_io_sq *io_sq;
1063         u16 qid;
1064         int i;
1065
1066         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1067                 qid = rss->host_rss_ind_tbl[i];
1068                 if (qid >= ENA_TOTAL_NUM_QUEUES)
1069                         return -EINVAL;
1070
1071                 io_sq = &ena_dev->io_sq_queues[qid];
1072
1073                 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1074                         return -EINVAL;
1075
1076                 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1077         }
1078
1079         return 0;
1080 }
1081
1082 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1083 {
1084         u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1085         struct ena_rss *rss = &ena_dev->rss;
1086         u8 idx;
1087         u16 i;
1088
1089         for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1090                 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1091
1092         for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1093                 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1094                         return -EINVAL;
1095                 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1096
1097                 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1098                         return -EINVAL;
1099
1100                 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1101         }
1102
1103         return 0;
1104 }
1105
1106 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1107 {
1108         size_t size;
1109
1110         size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1111
1112         ena_dev->intr_moder_tbl =
1113                 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1114         if (!ena_dev->intr_moder_tbl)
1115                 return -ENOMEM;
1116
1117         ena_com_config_default_interrupt_moderation_table(ena_dev);
1118
1119         return 0;
1120 }
1121
1122 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1123                                                  u16 intr_delay_resolution)
1124 {
1125         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1126         unsigned int i;
1127
1128         if (!intr_delay_resolution) {
1129                 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1130                 intr_delay_resolution = 1;
1131         }
1132         ena_dev->intr_delay_resolution = intr_delay_resolution;
1133
1134         /* update Rx */
1135         for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1136                 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1137
1138         /* update Tx */
1139         ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1140 }
1141
1142 /*****************************************************************************/
1143 /*******************************      API       ******************************/
1144 /*****************************************************************************/
1145
1146 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1147                                   struct ena_admin_aq_entry *cmd,
1148                                   size_t cmd_size,
1149                                   struct ena_admin_acq_entry *comp,
1150                                   size_t comp_size)
1151 {
1152         struct ena_comp_ctx *comp_ctx;
1153         int ret;
1154
1155         comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1156                                             comp, comp_size);
1157         if (unlikely(IS_ERR(comp_ctx))) {
1158                 if (comp_ctx == ERR_PTR(-ENODEV))
1159                         pr_debug("Failed to submit command [%ld]\n",
1160                                  PTR_ERR(comp_ctx));
1161                 else
1162                         pr_err("Failed to submit command [%ld]\n",
1163                                PTR_ERR(comp_ctx));
1164
1165                 return PTR_ERR(comp_ctx);
1166         }
1167
1168         ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1169         if (unlikely(ret)) {
1170                 if (admin_queue->running_state)
1171                         pr_err("Failed to process command. ret = %d\n", ret);
1172                 else
1173                         pr_debug("Failed to process command. ret = %d\n", ret);
1174         }
1175         return ret;
1176 }
1177
1178 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1179                          struct ena_com_io_cq *io_cq)
1180 {
1181         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1182         struct ena_admin_aq_create_cq_cmd create_cmd;
1183         struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1184         int ret;
1185
1186         memset(&create_cmd, 0x0, sizeof(create_cmd));
1187
1188         create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1189
1190         create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1191                 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1192         create_cmd.cq_caps_1 |=
1193                 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1194
1195         create_cmd.msix_vector = io_cq->msix_vector;
1196         create_cmd.cq_depth = io_cq->q_depth;
1197
1198         ret = ena_com_mem_addr_set(ena_dev,
1199                                    &create_cmd.cq_ba,
1200                                    io_cq->cdesc_addr.phys_addr);
1201         if (unlikely(ret)) {
1202                 pr_err("memory address set failed\n");
1203                 return ret;
1204         }
1205
1206         ret = ena_com_execute_admin_command(admin_queue,
1207                                             (struct ena_admin_aq_entry *)&create_cmd,
1208                                             sizeof(create_cmd),
1209                                             (struct ena_admin_acq_entry *)&cmd_completion,
1210                                             sizeof(cmd_completion));
1211         if (unlikely(ret)) {
1212                 pr_err("Failed to create IO CQ. error: %d\n", ret);
1213                 return ret;
1214         }
1215
1216         io_cq->idx = cmd_completion.cq_idx;
1217
1218         io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1219                 cmd_completion.cq_interrupt_unmask_register_offset);
1220
1221         if (cmd_completion.cq_head_db_register_offset)
1222                 io_cq->cq_head_db_reg =
1223                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1224                         cmd_completion.cq_head_db_register_offset);
1225
1226         if (cmd_completion.numa_node_register_offset)
1227                 io_cq->numa_node_cfg_reg =
1228                         (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1229                         cmd_completion.numa_node_register_offset);
1230
1231         pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1232
1233         return ret;
1234 }
1235
1236 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1237                             struct ena_com_io_sq **io_sq,
1238                             struct ena_com_io_cq **io_cq)
1239 {
1240         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1241                 pr_err("Invalid queue number %d but the max is %d\n", qid,
1242                        ENA_TOTAL_NUM_QUEUES);
1243                 return -EINVAL;
1244         }
1245
1246         *io_sq = &ena_dev->io_sq_queues[qid];
1247         *io_cq = &ena_dev->io_cq_queues[qid];
1248
1249         return 0;
1250 }
1251
1252 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1253 {
1254         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1255         struct ena_comp_ctx *comp_ctx;
1256         u16 i;
1257
1258         if (!admin_queue->comp_ctx)
1259                 return;
1260
1261         for (i = 0; i < admin_queue->q_depth; i++) {
1262                 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1263                 if (unlikely(!comp_ctx))
1264                         break;
1265
1266                 comp_ctx->status = ENA_CMD_ABORTED;
1267
1268                 complete(&comp_ctx->wait_event);
1269         }
1270 }
1271
1272 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1273 {
1274         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1275         unsigned long flags;
1276
1277         spin_lock_irqsave(&admin_queue->q_lock, flags);
1278         while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1279                 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1280                 msleep(20);
1281                 spin_lock_irqsave(&admin_queue->q_lock, flags);
1282         }
1283         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1284 }
1285
1286 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1287                           struct ena_com_io_cq *io_cq)
1288 {
1289         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1290         struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1291         struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1292         int ret;
1293
1294         memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1295
1296         destroy_cmd.cq_idx = io_cq->idx;
1297         destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1298
1299         ret = ena_com_execute_admin_command(admin_queue,
1300                                             (struct ena_admin_aq_entry *)&destroy_cmd,
1301                                             sizeof(destroy_cmd),
1302                                             (struct ena_admin_acq_entry *)&destroy_resp,
1303                                             sizeof(destroy_resp));
1304
1305         if (unlikely(ret && (ret != -ENODEV)))
1306                 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1307
1308         return ret;
1309 }
1310
1311 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1312 {
1313         return ena_dev->admin_queue.running_state;
1314 }
1315
1316 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1317 {
1318         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1319         unsigned long flags;
1320
1321         spin_lock_irqsave(&admin_queue->q_lock, flags);
1322         ena_dev->admin_queue.running_state = state;
1323         spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1324 }
1325
1326 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1327 {
1328         u16 depth = ena_dev->aenq.q_depth;
1329
1330         WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1331
1332         /* Init head_db to mark that all entries in the queue
1333          * are initially available
1334          */
1335         writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1336 }
1337
1338 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1339 {
1340         struct ena_com_admin_queue *admin_queue;
1341         struct ena_admin_set_feat_cmd cmd;
1342         struct ena_admin_set_feat_resp resp;
1343         struct ena_admin_get_feat_resp get_resp;
1344         int ret;
1345
1346         ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1347         if (ret) {
1348                 pr_info("Can't get aenq configuration\n");
1349                 return ret;
1350         }
1351
1352         if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1353                 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1354                         get_resp.u.aenq.supported_groups, groups_flag);
1355                 return -EOPNOTSUPP;
1356         }
1357
1358         memset(&cmd, 0x0, sizeof(cmd));
1359         admin_queue = &ena_dev->admin_queue;
1360
1361         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1362         cmd.aq_common_descriptor.flags = 0;
1363         cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1364         cmd.u.aenq.enabled_groups = groups_flag;
1365
1366         ret = ena_com_execute_admin_command(admin_queue,
1367                                             (struct ena_admin_aq_entry *)&cmd,
1368                                             sizeof(cmd),
1369                                             (struct ena_admin_acq_entry *)&resp,
1370                                             sizeof(resp));
1371
1372         if (unlikely(ret))
1373                 pr_err("Failed to config AENQ ret: %d\n", ret);
1374
1375         return ret;
1376 }
1377
1378 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1379 {
1380         u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1381         int width;
1382
1383         if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1384                 pr_err("Reg read timeout occurred\n");
1385                 return -ETIME;
1386         }
1387
1388         width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1389                 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1390
1391         pr_debug("ENA dma width: %d\n", width);
1392
1393         if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1394                 pr_err("DMA width illegal value: %d\n", width);
1395                 return -EINVAL;
1396         }
1397
1398         ena_dev->dma_addr_bits = width;
1399
1400         return width;
1401 }
1402
1403 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1404 {
1405         u32 ver;
1406         u32 ctrl_ver;
1407         u32 ctrl_ver_masked;
1408
1409         /* Make sure the ENA version and the controller version are at least
1410          * as the driver expects
1411          */
1412         ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1413         ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1414                                           ENA_REGS_CONTROLLER_VERSION_OFF);
1415
1416         if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1417                      (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1418                 pr_err("Reg read timeout occurred\n");
1419                 return -ETIME;
1420         }
1421
1422         pr_info("ena device version: %d.%d\n",
1423                 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1424                         ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1425                 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1426
1427         if (ver < MIN_ENA_VER) {
1428                 pr_err("ENA version is lower than the minimal version the driver supports\n");
1429                 return -1;
1430         }
1431
1432         pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1433                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1434                         ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1435                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1436                         ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1437                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1438                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1439                         ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1440
1441         ctrl_ver_masked =
1442                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1443                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1444                 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1445
1446         /* Validate the ctrl version without the implementation ID */
1447         if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1448                 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1449                 return -1;
1450         }
1451
1452         return 0;
1453 }
1454
1455 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1456 {
1457         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1458         struct ena_com_admin_cq *cq = &admin_queue->cq;
1459         struct ena_com_admin_sq *sq = &admin_queue->sq;
1460         struct ena_com_aenq *aenq = &ena_dev->aenq;
1461         u16 size;
1462
1463         if (admin_queue->comp_ctx)
1464                 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1465         admin_queue->comp_ctx = NULL;
1466         size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1467         if (sq->entries)
1468                 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1469                                   sq->dma_addr);
1470         sq->entries = NULL;
1471
1472         size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1473         if (cq->entries)
1474                 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1475                                   cq->dma_addr);
1476         cq->entries = NULL;
1477
1478         size = ADMIN_AENQ_SIZE(aenq->q_depth);
1479         if (ena_dev->aenq.entries)
1480                 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1481                                   aenq->dma_addr);
1482         aenq->entries = NULL;
1483 }
1484
1485 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1486 {
1487         u32 mask_value = 0;
1488
1489         if (polling)
1490                 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1491
1492         writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1493         ena_dev->admin_queue.polling = polling;
1494 }
1495
1496 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1497 {
1498         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1499
1500         spin_lock_init(&mmio_read->lock);
1501         mmio_read->read_resp =
1502                 dma_zalloc_coherent(ena_dev->dmadev,
1503                                     sizeof(*mmio_read->read_resp),
1504                                     &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1505         if (unlikely(!mmio_read->read_resp))
1506                 return -ENOMEM;
1507
1508         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1509
1510         mmio_read->read_resp->req_id = 0x0;
1511         mmio_read->seq_num = 0x0;
1512         mmio_read->readless_supported = true;
1513
1514         return 0;
1515 }
1516
1517 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1518 {
1519         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1520
1521         mmio_read->readless_supported = readless_supported;
1522 }
1523
1524 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1525 {
1526         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1527
1528         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1529         writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1530
1531         dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1532                           mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1533
1534         mmio_read->read_resp = NULL;
1535 }
1536
1537 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1538 {
1539         struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1540         u32 addr_low, addr_high;
1541
1542         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1543         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1544
1545         writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1546         writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1547 }
1548
1549 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1550                        struct ena_aenq_handlers *aenq_handlers,
1551                        bool init_spinlock)
1552 {
1553         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1554         u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1555         int ret;
1556
1557         dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1558
1559         if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1560                 pr_err("Reg read timeout occurred\n");
1561                 return -ETIME;
1562         }
1563
1564         if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1565                 pr_err("Device isn't ready, abort com init\n");
1566                 return -ENODEV;
1567         }
1568
1569         admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1570
1571         admin_queue->q_dmadev = ena_dev->dmadev;
1572         admin_queue->polling = false;
1573         admin_queue->curr_cmd_id = 0;
1574
1575         atomic_set(&admin_queue->outstanding_cmds, 0);
1576
1577         if (init_spinlock)
1578                 spin_lock_init(&admin_queue->q_lock);
1579
1580         ret = ena_com_init_comp_ctxt(admin_queue);
1581         if (ret)
1582                 goto error;
1583
1584         ret = ena_com_admin_init_sq(admin_queue);
1585         if (ret)
1586                 goto error;
1587
1588         ret = ena_com_admin_init_cq(admin_queue);
1589         if (ret)
1590                 goto error;
1591
1592         admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1593                 ENA_REGS_AQ_DB_OFF);
1594
1595         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1596         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1597
1598         writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1599         writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1600
1601         addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1602         addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1603
1604         writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1605         writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1606
1607         aq_caps = 0;
1608         aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1609         aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1610                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1611                         ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1612
1613         acq_caps = 0;
1614         acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1615         acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1616                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1617                 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1618
1619         writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1620         writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1621         ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1622         if (ret)
1623                 goto error;
1624
1625         admin_queue->running_state = true;
1626
1627         return 0;
1628 error:
1629         ena_com_admin_destroy(ena_dev);
1630
1631         return ret;
1632 }
1633
1634 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1635                             struct ena_com_create_io_ctx *ctx)
1636 {
1637         struct ena_com_io_sq *io_sq;
1638         struct ena_com_io_cq *io_cq;
1639         int ret;
1640
1641         if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1642                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1643                        ctx->qid, ENA_TOTAL_NUM_QUEUES);
1644                 return -EINVAL;
1645         }
1646
1647         io_sq = &ena_dev->io_sq_queues[ctx->qid];
1648         io_cq = &ena_dev->io_cq_queues[ctx->qid];
1649
1650         memset(io_sq, 0x0, sizeof(*io_sq));
1651         memset(io_cq, 0x0, sizeof(*io_cq));
1652
1653         /* Init CQ */
1654         io_cq->q_depth = ctx->queue_size;
1655         io_cq->direction = ctx->direction;
1656         io_cq->qid = ctx->qid;
1657
1658         io_cq->msix_vector = ctx->msix_vector;
1659
1660         io_sq->q_depth = ctx->queue_size;
1661         io_sq->direction = ctx->direction;
1662         io_sq->qid = ctx->qid;
1663
1664         io_sq->mem_queue_type = ctx->mem_queue_type;
1665
1666         if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1667                 /* header length is limited to 8 bits */
1668                 io_sq->tx_max_header_size =
1669                         min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1670
1671         ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1672         if (ret)
1673                 goto error;
1674         ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1675         if (ret)
1676                 goto error;
1677
1678         ret = ena_com_create_io_cq(ena_dev, io_cq);
1679         if (ret)
1680                 goto error;
1681
1682         ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1683         if (ret)
1684                 goto destroy_io_cq;
1685
1686         return 0;
1687
1688 destroy_io_cq:
1689         ena_com_destroy_io_cq(ena_dev, io_cq);
1690 error:
1691         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1692         return ret;
1693 }
1694
1695 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1696 {
1697         struct ena_com_io_sq *io_sq;
1698         struct ena_com_io_cq *io_cq;
1699
1700         if (qid >= ENA_TOTAL_NUM_QUEUES) {
1701                 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1702                        ENA_TOTAL_NUM_QUEUES);
1703                 return;
1704         }
1705
1706         io_sq = &ena_dev->io_sq_queues[qid];
1707         io_cq = &ena_dev->io_cq_queues[qid];
1708
1709         ena_com_destroy_io_sq(ena_dev, io_sq);
1710         ena_com_destroy_io_cq(ena_dev, io_cq);
1711
1712         ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1713 }
1714
1715 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1716                             struct ena_admin_get_feat_resp *resp)
1717 {
1718         return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1719 }
1720
1721 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1722                               struct ena_com_dev_get_features_ctx *get_feat_ctx)
1723 {
1724         struct ena_admin_get_feat_resp get_resp;
1725         int rc;
1726
1727         rc = ena_com_get_feature(ena_dev, &get_resp,
1728                                  ENA_ADMIN_DEVICE_ATTRIBUTES);
1729         if (rc)
1730                 return rc;
1731
1732         memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1733                sizeof(get_resp.u.dev_attr));
1734         ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1735
1736         rc = ena_com_get_feature(ena_dev, &get_resp,
1737                                  ENA_ADMIN_MAX_QUEUES_NUM);
1738         if (rc)
1739                 return rc;
1740
1741         memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1742                sizeof(get_resp.u.max_queue));
1743         ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1744
1745         rc = ena_com_get_feature(ena_dev, &get_resp,
1746                                  ENA_ADMIN_AENQ_CONFIG);
1747         if (rc)
1748                 return rc;
1749
1750         memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1751                sizeof(get_resp.u.aenq));
1752
1753         rc = ena_com_get_feature(ena_dev, &get_resp,
1754                                  ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1755         if (rc)
1756                 return rc;
1757
1758         memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1759                sizeof(get_resp.u.offload));
1760
1761         /* Driver hints isn't mandatory admin command. So in case the
1762          * command isn't supported set driver hints to 0
1763          */
1764         rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1765
1766         if (!rc)
1767                 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1768                        sizeof(get_resp.u.hw_hints));
1769         else if (rc == -EOPNOTSUPP)
1770                 memset(&get_feat_ctx->hw_hints, 0x0,
1771                        sizeof(get_feat_ctx->hw_hints));
1772         else
1773                 return rc;
1774
1775         return 0;
1776 }
1777
1778 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1779 {
1780         ena_com_handle_admin_completion(&ena_dev->admin_queue);
1781 }
1782
1783 /* ena_handle_specific_aenq_event:
1784  * return the handler that is relevant to the specific event group
1785  */
1786 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1787                                                      u16 group)
1788 {
1789         struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1790
1791         if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1792                 return aenq_handlers->handlers[group];
1793
1794         return aenq_handlers->unimplemented_handler;
1795 }
1796
1797 /* ena_aenq_intr_handler:
1798  * handles the aenq incoming events.
1799  * pop events from the queue and apply the specific handler
1800  */
1801 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1802 {
1803         struct ena_admin_aenq_entry *aenq_e;
1804         struct ena_admin_aenq_common_desc *aenq_common;
1805         struct ena_com_aenq *aenq  = &dev->aenq;
1806         ena_aenq_handler handler_cb;
1807         u16 masked_head, processed = 0;
1808         u8 phase;
1809
1810         masked_head = aenq->head & (aenq->q_depth - 1);
1811         phase = aenq->phase;
1812         aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1813         aenq_common = &aenq_e->aenq_common_desc;
1814
1815         /* Go over all the events */
1816         while ((READ_ONCE(aenq_common->flags) &
1817                 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1818                 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1819                          aenq_common->group, aenq_common->syndrom,
1820                          (u64)aenq_common->timestamp_low +
1821                                  ((u64)aenq_common->timestamp_high << 32));
1822
1823                 /* Handle specific event*/
1824                 handler_cb = ena_com_get_specific_aenq_cb(dev,
1825                                                           aenq_common->group);
1826                 handler_cb(data, aenq_e); /* call the actual event handler*/
1827
1828                 /* Get next event entry */
1829                 masked_head++;
1830                 processed++;
1831
1832                 if (unlikely(masked_head == aenq->q_depth)) {
1833                         masked_head = 0;
1834                         phase = !phase;
1835                 }
1836                 aenq_e = &aenq->entries[masked_head];
1837                 aenq_common = &aenq_e->aenq_common_desc;
1838         }
1839
1840         aenq->head += processed;
1841         aenq->phase = phase;
1842
1843         /* Don't update aenq doorbell if there weren't any processed events */
1844         if (!processed)
1845                 return;
1846
1847         /* write the aenq doorbell after all AENQ descriptors were read */
1848         mb();
1849         writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1850 }
1851
1852 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
1853                       enum ena_regs_reset_reason_types reset_reason)
1854 {
1855         u32 stat, timeout, cap, reset_val;
1856         int rc;
1857
1858         stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1859         cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1860
1861         if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1862                      (cap == ENA_MMIO_READ_TIMEOUT))) {
1863                 pr_err("Reg read32 timeout occurred\n");
1864                 return -ETIME;
1865         }
1866
1867         if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1868                 pr_err("Device isn't ready, can't reset device\n");
1869                 return -EINVAL;
1870         }
1871
1872         timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1873                         ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1874         if (timeout == 0) {
1875                 pr_err("Invalid timeout value\n");
1876                 return -EINVAL;
1877         }
1878
1879         /* start reset */
1880         reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1881         reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
1882                      ENA_REGS_DEV_CTL_RESET_REASON_MASK;
1883         writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1884
1885         /* Write again the MMIO read request address */
1886         ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1887
1888         rc = wait_for_reset_state(ena_dev, timeout,
1889                                   ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1890         if (rc != 0) {
1891                 pr_err("Reset indication didn't turn on\n");
1892                 return rc;
1893         }
1894
1895         /* reset done */
1896         writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1897         rc = wait_for_reset_state(ena_dev, timeout, 0);
1898         if (rc != 0) {
1899                 pr_err("Reset indication didn't turn off\n");
1900                 return rc;
1901         }
1902
1903         timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1904                 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1905         if (timeout)
1906                 /* the resolution of timeout reg is 100ms */
1907                 ena_dev->admin_queue.completion_timeout = timeout * 100000;
1908         else
1909                 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1910
1911         return 0;
1912 }
1913
1914 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1915                              struct ena_com_stats_ctx *ctx,
1916                              enum ena_admin_get_stats_type type)
1917 {
1918         struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1919         struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1920         struct ena_com_admin_queue *admin_queue;
1921         int ret;
1922
1923         admin_queue = &ena_dev->admin_queue;
1924
1925         get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1926         get_cmd->aq_common_descriptor.flags = 0;
1927         get_cmd->type = type;
1928
1929         ret =  ena_com_execute_admin_command(admin_queue,
1930                                              (struct ena_admin_aq_entry *)get_cmd,
1931                                              sizeof(*get_cmd),
1932                                              (struct ena_admin_acq_entry *)get_resp,
1933                                              sizeof(*get_resp));
1934
1935         if (unlikely(ret))
1936                 pr_err("Failed to get stats. error: %d\n", ret);
1937
1938         return ret;
1939 }
1940
1941 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1942                                 struct ena_admin_basic_stats *stats)
1943 {
1944         struct ena_com_stats_ctx ctx;
1945         int ret;
1946
1947         memset(&ctx, 0x0, sizeof(ctx));
1948         ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1949         if (likely(ret == 0))
1950                 memcpy(stats, &ctx.get_resp.basic_stats,
1951                        sizeof(ctx.get_resp.basic_stats));
1952
1953         return ret;
1954 }
1955
1956 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1957 {
1958         struct ena_com_admin_queue *admin_queue;
1959         struct ena_admin_set_feat_cmd cmd;
1960         struct ena_admin_set_feat_resp resp;
1961         int ret;
1962
1963         if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1964                 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1965                 return -EOPNOTSUPP;
1966         }
1967
1968         memset(&cmd, 0x0, sizeof(cmd));
1969         admin_queue = &ena_dev->admin_queue;
1970
1971         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1972         cmd.aq_common_descriptor.flags = 0;
1973         cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1974         cmd.u.mtu.mtu = mtu;
1975
1976         ret = ena_com_execute_admin_command(admin_queue,
1977                                             (struct ena_admin_aq_entry *)&cmd,
1978                                             sizeof(cmd),
1979                                             (struct ena_admin_acq_entry *)&resp,
1980                                             sizeof(resp));
1981
1982         if (unlikely(ret))
1983                 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
1984
1985         return ret;
1986 }
1987
1988 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
1989                                  struct ena_admin_feature_offload_desc *offload)
1990 {
1991         int ret;
1992         struct ena_admin_get_feat_resp resp;
1993
1994         ret = ena_com_get_feature(ena_dev, &resp,
1995                                   ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1996         if (unlikely(ret)) {
1997                 pr_err("Failed to get offload capabilities %d\n", ret);
1998                 return ret;
1999         }
2000
2001         memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2002
2003         return 0;
2004 }
2005
2006 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2007 {
2008         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2009         struct ena_rss *rss = &ena_dev->rss;
2010         struct ena_admin_set_feat_cmd cmd;
2011         struct ena_admin_set_feat_resp resp;
2012         struct ena_admin_get_feat_resp get_resp;
2013         int ret;
2014
2015         if (!ena_com_check_supported_feature_id(ena_dev,
2016                                                 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2017                 pr_debug("Feature %d isn't supported\n",
2018                          ENA_ADMIN_RSS_HASH_FUNCTION);
2019                 return -EOPNOTSUPP;
2020         }
2021
2022         /* Validate hash function is supported */
2023         ret = ena_com_get_feature(ena_dev, &get_resp,
2024                                   ENA_ADMIN_RSS_HASH_FUNCTION);
2025         if (unlikely(ret))
2026                 return ret;
2027
2028         if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2029                 pr_err("Func hash %d isn't supported by device, abort\n",
2030                        rss->hash_func);
2031                 return -EOPNOTSUPP;
2032         }
2033
2034         memset(&cmd, 0x0, sizeof(cmd));
2035
2036         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2037         cmd.aq_common_descriptor.flags =
2038                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2039         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2040         cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2041         cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2042
2043         ret = ena_com_mem_addr_set(ena_dev,
2044                                    &cmd.control_buffer.address,
2045                                    rss->hash_key_dma_addr);
2046         if (unlikely(ret)) {
2047                 pr_err("memory address set failed\n");
2048                 return ret;
2049         }
2050
2051         cmd.control_buffer.length = sizeof(*rss->hash_key);
2052
2053         ret = ena_com_execute_admin_command(admin_queue,
2054                                             (struct ena_admin_aq_entry *)&cmd,
2055                                             sizeof(cmd),
2056                                             (struct ena_admin_acq_entry *)&resp,
2057                                             sizeof(resp));
2058         if (unlikely(ret)) {
2059                 pr_err("Failed to set hash function %d. error: %d\n",
2060                        rss->hash_func, ret);
2061                 return -EINVAL;
2062         }
2063
2064         return 0;
2065 }
2066
2067 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2068                                enum ena_admin_hash_functions func,
2069                                const u8 *key, u16 key_len, u32 init_val)
2070 {
2071         struct ena_rss *rss = &ena_dev->rss;
2072         struct ena_admin_get_feat_resp get_resp;
2073         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2074                 rss->hash_key;
2075         int rc;
2076
2077         /* Make sure size is a mult of DWs */
2078         if (unlikely(key_len & 0x3))
2079                 return -EINVAL;
2080
2081         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2082                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2083                                     rss->hash_key_dma_addr,
2084                                     sizeof(*rss->hash_key));
2085         if (unlikely(rc))
2086                 return rc;
2087
2088         if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2089                 pr_err("Flow hash function %d isn't supported\n", func);
2090                 return -EOPNOTSUPP;
2091         }
2092
2093         switch (func) {
2094         case ENA_ADMIN_TOEPLITZ:
2095                 if (key) {
2096                         if (key_len != sizeof(hash_key->key)) {
2097                                 pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2098                                        key_len, sizeof(hash_key->key));
2099                                 return -EINVAL;
2100                         }
2101                         memcpy(hash_key->key, key, key_len);
2102                         rss->hash_init_val = init_val;
2103                         hash_key->keys_num = key_len >> 2;
2104                 }
2105                 break;
2106         case ENA_ADMIN_CRC32:
2107                 rss->hash_init_val = init_val;
2108                 break;
2109         default:
2110                 pr_err("Invalid hash function (%d)\n", func);
2111                 return -EINVAL;
2112         }
2113
2114         rss->hash_func = func;
2115         rc = ena_com_set_hash_function(ena_dev);
2116
2117         /* Restore the old function */
2118         if (unlikely(rc))
2119                 ena_com_get_hash_function(ena_dev, NULL, NULL);
2120
2121         return rc;
2122 }
2123
2124 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2125                               enum ena_admin_hash_functions *func,
2126                               u8 *key)
2127 {
2128         struct ena_rss *rss = &ena_dev->rss;
2129         struct ena_admin_get_feat_resp get_resp;
2130         struct ena_admin_feature_rss_flow_hash_control *hash_key =
2131                 rss->hash_key;
2132         int rc;
2133
2134         if (unlikely(!func))
2135                 return -EINVAL;
2136
2137         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2138                                     ENA_ADMIN_RSS_HASH_FUNCTION,
2139                                     rss->hash_key_dma_addr,
2140                                     sizeof(*rss->hash_key));
2141         if (unlikely(rc))
2142                 return rc;
2143
2144         /* ffs() returns 1 in case the lsb is set */
2145         rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2146         if (rss->hash_func)
2147                 rss->hash_func--;
2148
2149         *func = rss->hash_func;
2150
2151         if (key)
2152                 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2153
2154         return 0;
2155 }
2156
2157 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2158                           enum ena_admin_flow_hash_proto proto,
2159                           u16 *fields)
2160 {
2161         struct ena_rss *rss = &ena_dev->rss;
2162         struct ena_admin_get_feat_resp get_resp;
2163         int rc;
2164
2165         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2166                                     ENA_ADMIN_RSS_HASH_INPUT,
2167                                     rss->hash_ctrl_dma_addr,
2168                                     sizeof(*rss->hash_ctrl));
2169         if (unlikely(rc))
2170                 return rc;
2171
2172         if (fields)
2173                 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2174
2175         return 0;
2176 }
2177
2178 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2179 {
2180         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2181         struct ena_rss *rss = &ena_dev->rss;
2182         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2183         struct ena_admin_set_feat_cmd cmd;
2184         struct ena_admin_set_feat_resp resp;
2185         int ret;
2186
2187         if (!ena_com_check_supported_feature_id(ena_dev,
2188                                                 ENA_ADMIN_RSS_HASH_INPUT)) {
2189                 pr_debug("Feature %d isn't supported\n",
2190                          ENA_ADMIN_RSS_HASH_INPUT);
2191                 return -EOPNOTSUPP;
2192         }
2193
2194         memset(&cmd, 0x0, sizeof(cmd));
2195
2196         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2197         cmd.aq_common_descriptor.flags =
2198                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2199         cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2200         cmd.u.flow_hash_input.enabled_input_sort =
2201                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2202                 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2203
2204         ret = ena_com_mem_addr_set(ena_dev,
2205                                    &cmd.control_buffer.address,
2206                                    rss->hash_ctrl_dma_addr);
2207         if (unlikely(ret)) {
2208                 pr_err("memory address set failed\n");
2209                 return ret;
2210         }
2211         cmd.control_buffer.length = sizeof(*hash_ctrl);
2212
2213         ret = ena_com_execute_admin_command(admin_queue,
2214                                             (struct ena_admin_aq_entry *)&cmd,
2215                                             sizeof(cmd),
2216                                             (struct ena_admin_acq_entry *)&resp,
2217                                             sizeof(resp));
2218         if (unlikely(ret))
2219                 pr_err("Failed to set hash input. error: %d\n", ret);
2220
2221         return ret;
2222 }
2223
2224 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2225 {
2226         struct ena_rss *rss = &ena_dev->rss;
2227         struct ena_admin_feature_rss_hash_control *hash_ctrl =
2228                 rss->hash_ctrl;
2229         u16 available_fields = 0;
2230         int rc, i;
2231
2232         /* Get the supported hash input */
2233         rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2234         if (unlikely(rc))
2235                 return rc;
2236
2237         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2238                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2239                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2240
2241         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2242                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2243                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2244
2245         hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2246                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2247                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2248
2249         hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2250                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2251                 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2252
2253         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2254                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2255
2256         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2257                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2258
2259         hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2260                 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2261
2262         hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2263                 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2264
2265         for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2266                 available_fields = hash_ctrl->selected_fields[i].fields &
2267                                 hash_ctrl->supported_fields[i].fields;
2268                 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2269                         pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2270                                i, hash_ctrl->supported_fields[i].fields,
2271                                hash_ctrl->selected_fields[i].fields);
2272                         return -EOPNOTSUPP;
2273                 }
2274         }
2275
2276         rc = ena_com_set_hash_ctrl(ena_dev);
2277
2278         /* In case of failure, restore the old hash ctrl */
2279         if (unlikely(rc))
2280                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2281
2282         return rc;
2283 }
2284
2285 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2286                            enum ena_admin_flow_hash_proto proto,
2287                            u16 hash_fields)
2288 {
2289         struct ena_rss *rss = &ena_dev->rss;
2290         struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2291         u16 supported_fields;
2292         int rc;
2293
2294         if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2295                 pr_err("Invalid proto num (%u)\n", proto);
2296                 return -EINVAL;
2297         }
2298
2299         /* Get the ctrl table */
2300         rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2301         if (unlikely(rc))
2302                 return rc;
2303
2304         /* Make sure all the fields are supported */
2305         supported_fields = hash_ctrl->supported_fields[proto].fields;
2306         if ((hash_fields & supported_fields) != hash_fields) {
2307                 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2308                        proto, hash_fields, supported_fields);
2309         }
2310
2311         hash_ctrl->selected_fields[proto].fields = hash_fields;
2312
2313         rc = ena_com_set_hash_ctrl(ena_dev);
2314
2315         /* In case of failure, restore the old hash ctrl */
2316         if (unlikely(rc))
2317                 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2318
2319         return 0;
2320 }
2321
2322 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2323                                       u16 entry_idx, u16 entry_value)
2324 {
2325         struct ena_rss *rss = &ena_dev->rss;
2326
2327         if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2328                 return -EINVAL;
2329
2330         if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2331                 return -EINVAL;
2332
2333         rss->host_rss_ind_tbl[entry_idx] = entry_value;
2334
2335         return 0;
2336 }
2337
2338 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2339 {
2340         struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2341         struct ena_rss *rss = &ena_dev->rss;
2342         struct ena_admin_set_feat_cmd cmd;
2343         struct ena_admin_set_feat_resp resp;
2344         int ret;
2345
2346         if (!ena_com_check_supported_feature_id(
2347                     ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2348                 pr_debug("Feature %d isn't supported\n",
2349                          ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2350                 return -EOPNOTSUPP;
2351         }
2352
2353         ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2354         if (ret) {
2355                 pr_err("Failed to convert host indirection table to device table\n");
2356                 return ret;
2357         }
2358
2359         memset(&cmd, 0x0, sizeof(cmd));
2360
2361         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2362         cmd.aq_common_descriptor.flags =
2363                 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2364         cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2365         cmd.u.ind_table.size = rss->tbl_log_size;
2366         cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2367
2368         ret = ena_com_mem_addr_set(ena_dev,
2369                                    &cmd.control_buffer.address,
2370                                    rss->rss_ind_tbl_dma_addr);
2371         if (unlikely(ret)) {
2372                 pr_err("memory address set failed\n");
2373                 return ret;
2374         }
2375
2376         cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2377                 sizeof(struct ena_admin_rss_ind_table_entry);
2378
2379         ret = ena_com_execute_admin_command(admin_queue,
2380                                             (struct ena_admin_aq_entry *)&cmd,
2381                                             sizeof(cmd),
2382                                             (struct ena_admin_acq_entry *)&resp,
2383                                             sizeof(resp));
2384
2385         if (unlikely(ret))
2386                 pr_err("Failed to set indirect table. error: %d\n", ret);
2387
2388         return ret;
2389 }
2390
2391 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2392 {
2393         struct ena_rss *rss = &ena_dev->rss;
2394         struct ena_admin_get_feat_resp get_resp;
2395         u32 tbl_size;
2396         int i, rc;
2397
2398         tbl_size = (1ULL << rss->tbl_log_size) *
2399                 sizeof(struct ena_admin_rss_ind_table_entry);
2400
2401         rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2402                                     ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2403                                     rss->rss_ind_tbl_dma_addr,
2404                                     tbl_size);
2405         if (unlikely(rc))
2406                 return rc;
2407
2408         if (!ind_tbl)
2409                 return 0;
2410
2411         rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2412         if (unlikely(rc))
2413                 return rc;
2414
2415         for (i = 0; i < (1 << rss->tbl_log_size); i++)
2416                 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2417
2418         return 0;
2419 }
2420
2421 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2422 {
2423         int rc;
2424
2425         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2426
2427         rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2428         if (unlikely(rc))
2429                 goto err_indr_tbl;
2430
2431         rc = ena_com_hash_key_allocate(ena_dev);
2432         if (unlikely(rc))
2433                 goto err_hash_key;
2434
2435         ena_com_hash_key_fill_default_key(ena_dev);
2436
2437         rc = ena_com_hash_ctrl_init(ena_dev);
2438         if (unlikely(rc))
2439                 goto err_hash_ctrl;
2440
2441         return 0;
2442
2443 err_hash_ctrl:
2444         ena_com_hash_key_destroy(ena_dev);
2445 err_hash_key:
2446         ena_com_indirect_table_destroy(ena_dev);
2447 err_indr_tbl:
2448
2449         return rc;
2450 }
2451
2452 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2453 {
2454         ena_com_indirect_table_destroy(ena_dev);
2455         ena_com_hash_key_destroy(ena_dev);
2456         ena_com_hash_ctrl_destroy(ena_dev);
2457
2458         memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2459 }
2460
2461 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2462 {
2463         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2464
2465         host_attr->host_info =
2466                 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2467                                     &host_attr->host_info_dma_addr, GFP_KERNEL);
2468         if (unlikely(!host_attr->host_info))
2469                 return -ENOMEM;
2470
2471         return 0;
2472 }
2473
2474 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2475                                 u32 debug_area_size)
2476 {
2477         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2478
2479         host_attr->debug_area_virt_addr =
2480                 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2481                                     &host_attr->debug_area_dma_addr, GFP_KERNEL);
2482         if (unlikely(!host_attr->debug_area_virt_addr)) {
2483                 host_attr->debug_area_size = 0;
2484                 return -ENOMEM;
2485         }
2486
2487         host_attr->debug_area_size = debug_area_size;
2488
2489         return 0;
2490 }
2491
2492 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2493 {
2494         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2495
2496         if (host_attr->host_info) {
2497                 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2498                                   host_attr->host_info_dma_addr);
2499                 host_attr->host_info = NULL;
2500         }
2501 }
2502
2503 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2504 {
2505         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2506
2507         if (host_attr->debug_area_virt_addr) {
2508                 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2509                                   host_attr->debug_area_virt_addr,
2510                                   host_attr->debug_area_dma_addr);
2511                 host_attr->debug_area_virt_addr = NULL;
2512         }
2513 }
2514
2515 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2516 {
2517         struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2518         struct ena_com_admin_queue *admin_queue;
2519         struct ena_admin_set_feat_cmd cmd;
2520         struct ena_admin_set_feat_resp resp;
2521
2522         int ret;
2523
2524         /* Host attribute config is called before ena_com_get_dev_attr_feat
2525          * so ena_com can't check if the feature is supported.
2526          */
2527
2528         memset(&cmd, 0x0, sizeof(cmd));
2529         admin_queue = &ena_dev->admin_queue;
2530
2531         cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2532         cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2533
2534         ret = ena_com_mem_addr_set(ena_dev,
2535                                    &cmd.u.host_attr.debug_ba,
2536                                    host_attr->debug_area_dma_addr);
2537         if (unlikely(ret)) {
2538                 pr_err("memory address set failed\n");
2539                 return ret;
2540         }
2541
2542         ret = ena_com_mem_addr_set(ena_dev,
2543                                    &cmd.u.host_attr.os_info_ba,
2544                                    host_attr->host_info_dma_addr);
2545         if (unlikely(ret)) {
2546                 pr_err("memory address set failed\n");
2547                 return ret;
2548         }
2549
2550         cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2551
2552         ret = ena_com_execute_admin_command(admin_queue,
2553                                             (struct ena_admin_aq_entry *)&cmd,
2554                                             sizeof(cmd),
2555                                             (struct ena_admin_acq_entry *)&resp,
2556                                             sizeof(resp));
2557
2558         if (unlikely(ret))
2559                 pr_err("Failed to set host attributes: %d\n", ret);
2560
2561         return ret;
2562 }
2563
2564 /* Interrupt moderation */
2565 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2566 {
2567         return ena_com_check_supported_feature_id(ena_dev,
2568                                                   ENA_ADMIN_INTERRUPT_MODERATION);
2569 }
2570
2571 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2572                                                       u32 tx_coalesce_usecs)
2573 {
2574         if (!ena_dev->intr_delay_resolution) {
2575                 pr_err("Illegal interrupt delay granularity value\n");
2576                 return -EFAULT;
2577         }
2578
2579         ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2580                 ena_dev->intr_delay_resolution;
2581
2582         return 0;
2583 }
2584
2585 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2586                                                       u32 rx_coalesce_usecs)
2587 {
2588         if (!ena_dev->intr_delay_resolution) {
2589                 pr_err("Illegal interrupt delay granularity value\n");
2590                 return -EFAULT;
2591         }
2592
2593         /* We use LOWEST entry of moderation table for storing
2594          * nonadaptive interrupt coalescing values
2595          */
2596         ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2597                 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2598
2599         return 0;
2600 }
2601
2602 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2603 {
2604         if (ena_dev->intr_moder_tbl)
2605                 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2606         ena_dev->intr_moder_tbl = NULL;
2607 }
2608
2609 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2610 {
2611         struct ena_admin_get_feat_resp get_resp;
2612         u16 delay_resolution;
2613         int rc;
2614
2615         rc = ena_com_get_feature(ena_dev, &get_resp,
2616                                  ENA_ADMIN_INTERRUPT_MODERATION);
2617
2618         if (rc) {
2619                 if (rc == -EOPNOTSUPP) {
2620                         pr_debug("Feature %d isn't supported\n",
2621                                  ENA_ADMIN_INTERRUPT_MODERATION);
2622                         rc = 0;
2623                 } else {
2624                         pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2625                                rc);
2626                 }
2627
2628                 /* no moderation supported, disable adaptive support */
2629                 ena_com_disable_adaptive_moderation(ena_dev);
2630                 return rc;
2631         }
2632
2633         rc = ena_com_init_interrupt_moderation_table(ena_dev);
2634         if (rc)
2635                 goto err;
2636
2637         /* if moderation is supported by device we set adaptive moderation */
2638         delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2639         ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2640         ena_com_enable_adaptive_moderation(ena_dev);
2641
2642         return 0;
2643 err:
2644         ena_com_destroy_interrupt_moderation(ena_dev);
2645         return rc;
2646 }
2647
2648 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2649 {
2650         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2651
2652         if (!intr_moder_tbl)
2653                 return;
2654
2655         intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2656                 ENA_INTR_LOWEST_USECS;
2657         intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2658                 ENA_INTR_LOWEST_PKTS;
2659         intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2660                 ENA_INTR_LOWEST_BYTES;
2661
2662         intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2663                 ENA_INTR_LOW_USECS;
2664         intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2665                 ENA_INTR_LOW_PKTS;
2666         intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2667                 ENA_INTR_LOW_BYTES;
2668
2669         intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2670                 ENA_INTR_MID_USECS;
2671         intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2672                 ENA_INTR_MID_PKTS;
2673         intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2674                 ENA_INTR_MID_BYTES;
2675
2676         intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2677                 ENA_INTR_HIGH_USECS;
2678         intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2679                 ENA_INTR_HIGH_PKTS;
2680         intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2681                 ENA_INTR_HIGH_BYTES;
2682
2683         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2684                 ENA_INTR_HIGHEST_USECS;
2685         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2686                 ENA_INTR_HIGHEST_PKTS;
2687         intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2688                 ENA_INTR_HIGHEST_BYTES;
2689 }
2690
2691 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2692 {
2693         return ena_dev->intr_moder_tx_interval;
2694 }
2695
2696 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2697 {
2698         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2699
2700         if (intr_moder_tbl)
2701                 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2702
2703         return 0;
2704 }
2705
2706 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2707                                         enum ena_intr_moder_level level,
2708                                         struct ena_intr_moder_entry *entry)
2709 {
2710         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2711
2712         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2713                 return;
2714
2715         intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2716         if (ena_dev->intr_delay_resolution)
2717                 intr_moder_tbl[level].intr_moder_interval /=
2718                         ena_dev->intr_delay_resolution;
2719         intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2720
2721         /* use hardcoded value until ethtool supports bytecount parameter */
2722         if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2723                 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2724 }
2725
2726 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2727                                        enum ena_intr_moder_level level,
2728                                        struct ena_intr_moder_entry *entry)
2729 {
2730         struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2731
2732         if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2733                 return;
2734
2735         entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2736         if (ena_dev->intr_delay_resolution)
2737                 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2738         entry->pkts_per_interval =
2739         intr_moder_tbl[level].pkts_per_interval;
2740         entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2741 }