GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / ethernet / huawei / hinic / hinic_hw_wq.c
1 /*
2  * Huawei HiNIC PCI Express Linux driver
3  * Copyright(c) 2017 Huawei Technologies Co., Ltd
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
12  * for more details.
13  *
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/pci.h>
19 #include <linux/device.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/slab.h>
22 #include <linux/atomic.h>
23 #include <linux/semaphore.h>
24 #include <linux/errno.h>
25 #include <linux/vmalloc.h>
26 #include <linux/err.h>
27 #include <asm/byteorder.h>
28
29 #include "hinic_hw_if.h"
30 #include "hinic_hw_wqe.h"
31 #include "hinic_hw_wq.h"
32 #include "hinic_hw_cmdq.h"
33
34 #define WQS_BLOCKS_PER_PAGE             4
35
36 #define WQ_BLOCK_SIZE                   4096
37 #define WQS_PAGE_SIZE                   (WQS_BLOCKS_PER_PAGE * WQ_BLOCK_SIZE)
38
39 #define WQS_MAX_NUM_BLOCKS              128
40 #define WQS_FREE_BLOCKS_SIZE(wqs)       (WQS_MAX_NUM_BLOCKS * \
41                                          sizeof((wqs)->free_blocks[0]))
42
43 #define WQ_SIZE(wq)                     ((wq)->q_depth * (wq)->wqebb_size)
44
45 #define WQ_PAGE_ADDR_SIZE               sizeof(u64)
46 #define WQ_MAX_PAGES                    (WQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
47
48 #define CMDQ_BLOCK_SIZE                 512
49 #define CMDQ_PAGE_SIZE                  4096
50
51 #define CMDQ_WQ_MAX_PAGES               (CMDQ_BLOCK_SIZE / WQ_PAGE_ADDR_SIZE)
52
53 #define WQ_BASE_VADDR(wqs, wq)          \
54                         ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \
55                                 + (wq)->block_idx * WQ_BLOCK_SIZE)
56
57 #define WQ_BASE_PADDR(wqs, wq)          \
58                         ((wqs)->page_paddr[(wq)->page_idx] \
59                                 + (wq)->block_idx * WQ_BLOCK_SIZE)
60
61 #define WQ_BASE_ADDR(wqs, wq)           \
62                         ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \
63                                 + (wq)->block_idx * WQ_BLOCK_SIZE)
64
65 #define CMDQ_BASE_VADDR(cmdq_pages, wq) \
66                         ((void *)((cmdq_pages)->page_vaddr) \
67                                 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
68
69 #define CMDQ_BASE_PADDR(cmdq_pages, wq) \
70                         ((cmdq_pages)->page_paddr \
71                                 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
72
73 #define CMDQ_BASE_ADDR(cmdq_pages, wq)  \
74                         ((void *)((cmdq_pages)->shadow_page_vaddr) \
75                                 + (wq)->block_idx * CMDQ_BLOCK_SIZE)
76
77 #define WQE_PAGE_OFF(wq, idx)   (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \
78                                         (wq)->wqebb_size)
79
80 #define WQE_PAGE_NUM(wq, idx)   (((idx) / ((wq)->num_wqebbs_per_page)) \
81                                         & ((wq)->num_q_pages - 1))
82
83 #define WQ_PAGE_ADDR(wq, idx)           \
84                         ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)])
85
86 #define MASKED_WQE_IDX(wq, idx)         ((idx) & (wq)->mask)
87
88 #define WQE_IN_RANGE(wqe, start, end)   \
89                 (((unsigned long)(wqe) >= (unsigned long)(start)) && \
90                  ((unsigned long)(wqe) < (unsigned long)(end)))
91
92 #define WQE_SHADOW_PAGE(wq, wqe)        \
93                 (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \
94                         / (wq)->max_wqe_size)
95
96 /**
97  * queue_alloc_page - allocate page for Queue
98  * @hwif: HW interface for allocating DMA
99  * @vaddr: virtual address will be returned in this address
100  * @paddr: physical address will be returned in this address
101  * @shadow_vaddr: VM area will be return here for holding WQ page addresses
102  * @page_sz: page size of each WQ page
103  *
104  * Return 0 - Success, negative - Failure
105  **/
106 static int queue_alloc_page(struct hinic_hwif *hwif, u64 **vaddr, u64 *paddr,
107                             void ***shadow_vaddr, size_t page_sz)
108 {
109         struct pci_dev *pdev = hwif->pdev;
110         dma_addr_t dma_addr;
111
112         *vaddr = dma_zalloc_coherent(&pdev->dev, page_sz, &dma_addr,
113                                      GFP_KERNEL);
114         if (!*vaddr) {
115                 dev_err(&pdev->dev, "Failed to allocate dma for wqs page\n");
116                 return -ENOMEM;
117         }
118
119         *paddr = (u64)dma_addr;
120
121         /* use vzalloc for big mem */
122         *shadow_vaddr = vzalloc(page_sz);
123         if (!*shadow_vaddr)
124                 goto err_shadow_vaddr;
125
126         return 0;
127
128 err_shadow_vaddr:
129         dma_free_coherent(&pdev->dev, page_sz, *vaddr, dma_addr);
130         return -ENOMEM;
131 }
132
133 /**
134  * wqs_allocate_page - allocate page for WQ set
135  * @wqs: Work Queue Set
136  * @page_idx: the page index of the page will be allocated
137  *
138  * Return 0 - Success, negative - Failure
139  **/
140 static int wqs_allocate_page(struct hinic_wqs *wqs, int page_idx)
141 {
142         return queue_alloc_page(wqs->hwif, &wqs->page_vaddr[page_idx],
143                                 &wqs->page_paddr[page_idx],
144                                 &wqs->shadow_page_vaddr[page_idx],
145                                 WQS_PAGE_SIZE);
146 }
147
148 /**
149  * wqs_free_page - free page of WQ set
150  * @wqs: Work Queue Set
151  * @page_idx: the page index of the page will be freed
152  **/
153 static void wqs_free_page(struct hinic_wqs *wqs, int page_idx)
154 {
155         struct hinic_hwif *hwif = wqs->hwif;
156         struct pci_dev *pdev = hwif->pdev;
157
158         dma_free_coherent(&pdev->dev, WQS_PAGE_SIZE,
159                           wqs->page_vaddr[page_idx],
160                           (dma_addr_t)wqs->page_paddr[page_idx]);
161         vfree(wqs->shadow_page_vaddr[page_idx]);
162 }
163
164 /**
165  * cmdq_allocate_page - allocate page for cmdq
166  * @cmdq_pages: the pages of the cmdq queue struct to hold the page
167  *
168  * Return 0 - Success, negative - Failure
169  **/
170 static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
171 {
172         return queue_alloc_page(cmdq_pages->hwif, &cmdq_pages->page_vaddr,
173                                 &cmdq_pages->page_paddr,
174                                 &cmdq_pages->shadow_page_vaddr,
175                                 CMDQ_PAGE_SIZE);
176 }
177
178 /**
179  * cmdq_free_page - free page from cmdq
180  * @cmdq_pages: the pages of the cmdq queue struct that hold the page
181  *
182  * Return 0 - Success, negative - Failure
183  **/
184 static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
185 {
186         struct hinic_hwif *hwif = cmdq_pages->hwif;
187         struct pci_dev *pdev = hwif->pdev;
188
189         dma_free_coherent(&pdev->dev, CMDQ_PAGE_SIZE,
190                           cmdq_pages->page_vaddr,
191                           (dma_addr_t)cmdq_pages->page_paddr);
192         vfree(cmdq_pages->shadow_page_vaddr);
193 }
194
195 static int alloc_page_arrays(struct hinic_wqs *wqs)
196 {
197         struct hinic_hwif *hwif = wqs->hwif;
198         struct pci_dev *pdev = hwif->pdev;
199         size_t size;
200
201         size = wqs->num_pages * sizeof(*wqs->page_paddr);
202         wqs->page_paddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
203         if (!wqs->page_paddr)
204                 return -ENOMEM;
205
206         size = wqs->num_pages * sizeof(*wqs->page_vaddr);
207         wqs->page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
208         if (!wqs->page_vaddr)
209                 goto err_page_vaddr;
210
211         size = wqs->num_pages * sizeof(*wqs->shadow_page_vaddr);
212         wqs->shadow_page_vaddr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
213         if (!wqs->shadow_page_vaddr)
214                 goto err_page_shadow_vaddr;
215
216         return 0;
217
218 err_page_shadow_vaddr:
219         devm_kfree(&pdev->dev, wqs->page_vaddr);
220
221 err_page_vaddr:
222         devm_kfree(&pdev->dev, wqs->page_paddr);
223         return -ENOMEM;
224 }
225
226 static void free_page_arrays(struct hinic_wqs *wqs)
227 {
228         struct hinic_hwif *hwif = wqs->hwif;
229         struct pci_dev *pdev = hwif->pdev;
230
231         devm_kfree(&pdev->dev, wqs->shadow_page_vaddr);
232         devm_kfree(&pdev->dev, wqs->page_vaddr);
233         devm_kfree(&pdev->dev, wqs->page_paddr);
234 }
235
236 static int wqs_next_block(struct hinic_wqs *wqs, int *page_idx,
237                           int *block_idx)
238 {
239         int pos;
240
241         down(&wqs->alloc_blocks_lock);
242
243         wqs->num_free_blks--;
244
245         if (wqs->num_free_blks < 0) {
246                 wqs->num_free_blks++;
247                 up(&wqs->alloc_blocks_lock);
248                 return -ENOMEM;
249         }
250
251         pos = wqs->alloc_blk_pos++;
252         pos &= WQS_MAX_NUM_BLOCKS - 1;
253
254         *page_idx = wqs->free_blocks[pos].page_idx;
255         *block_idx = wqs->free_blocks[pos].block_idx;
256
257         wqs->free_blocks[pos].page_idx = -1;
258         wqs->free_blocks[pos].block_idx = -1;
259
260         up(&wqs->alloc_blocks_lock);
261         return 0;
262 }
263
264 static void wqs_return_block(struct hinic_wqs *wqs, int page_idx,
265                              int block_idx)
266 {
267         int pos;
268
269         down(&wqs->alloc_blocks_lock);
270
271         pos = wqs->return_blk_pos++;
272         pos &= WQS_MAX_NUM_BLOCKS - 1;
273
274         wqs->free_blocks[pos].page_idx = page_idx;
275         wqs->free_blocks[pos].block_idx = block_idx;
276
277         wqs->num_free_blks++;
278
279         up(&wqs->alloc_blocks_lock);
280 }
281
282 static void init_wqs_blocks_arr(struct hinic_wqs *wqs)
283 {
284         int page_idx, blk_idx, pos = 0;
285
286         for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
287                 for (blk_idx = 0; blk_idx < WQS_BLOCKS_PER_PAGE; blk_idx++) {
288                         wqs->free_blocks[pos].page_idx = page_idx;
289                         wqs->free_blocks[pos].block_idx = blk_idx;
290                         pos++;
291                 }
292         }
293
294         wqs->alloc_blk_pos = 0;
295         wqs->return_blk_pos = pos;
296         wqs->num_free_blks = pos;
297
298         sema_init(&wqs->alloc_blocks_lock, 1);
299 }
300
301 /**
302  * hinic_wqs_alloc - allocate Work Queues set
303  * @wqs: Work Queue Set
304  * @max_wqs: maximum wqs to allocate
305  * @hwif: HW interface for use for the allocation
306  *
307  * Return 0 - Success, negative - Failure
308  **/
309 int hinic_wqs_alloc(struct hinic_wqs *wqs, int max_wqs,
310                     struct hinic_hwif *hwif)
311 {
312         struct pci_dev *pdev = hwif->pdev;
313         int err, i, page_idx;
314
315         max_wqs = ALIGN(max_wqs, WQS_BLOCKS_PER_PAGE);
316         if (max_wqs > WQS_MAX_NUM_BLOCKS)  {
317                 dev_err(&pdev->dev, "Invalid max_wqs = %d\n", max_wqs);
318                 return -EINVAL;
319         }
320
321         wqs->hwif = hwif;
322         wqs->num_pages = max_wqs / WQS_BLOCKS_PER_PAGE;
323
324         if (alloc_page_arrays(wqs)) {
325                 dev_err(&pdev->dev,
326                         "Failed to allocate mem for page addresses\n");
327                 return -ENOMEM;
328         }
329
330         for (page_idx = 0; page_idx < wqs->num_pages; page_idx++) {
331                 err = wqs_allocate_page(wqs, page_idx);
332                 if (err) {
333                         dev_err(&pdev->dev, "Failed wq page allocation\n");
334                         goto err_wq_allocate_page;
335                 }
336         }
337
338         wqs->free_blocks = devm_kzalloc(&pdev->dev, WQS_FREE_BLOCKS_SIZE(wqs),
339                                         GFP_KERNEL);
340         if (!wqs->free_blocks) {
341                 err = -ENOMEM;
342                 goto err_alloc_blocks;
343         }
344
345         init_wqs_blocks_arr(wqs);
346         return 0;
347
348 err_alloc_blocks:
349 err_wq_allocate_page:
350         for (i = 0; i < page_idx; i++)
351                 wqs_free_page(wqs, i);
352
353         free_page_arrays(wqs);
354         return err;
355 }
356
357 /**
358  * hinic_wqs_free - free Work Queues set
359  * @wqs: Work Queue Set
360  **/
361 void hinic_wqs_free(struct hinic_wqs *wqs)
362 {
363         struct hinic_hwif *hwif = wqs->hwif;
364         struct pci_dev *pdev = hwif->pdev;
365         int page_idx;
366
367         devm_kfree(&pdev->dev, wqs->free_blocks);
368
369         for (page_idx = 0; page_idx < wqs->num_pages; page_idx++)
370                 wqs_free_page(wqs, page_idx);
371
372         free_page_arrays(wqs);
373 }
374
375 /**
376  * alloc_wqes_shadow - allocate WQE shadows for WQ
377  * @wq: WQ to allocate shadows for
378  *
379  * Return 0 - Success, negative - Failure
380  **/
381 static int alloc_wqes_shadow(struct hinic_wq *wq)
382 {
383         struct hinic_hwif *hwif = wq->hwif;
384         struct pci_dev *pdev = hwif->pdev;
385         size_t size;
386
387         size = wq->num_q_pages * wq->max_wqe_size;
388         wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
389         if (!wq->shadow_wqe)
390                 return -ENOMEM;
391
392         size = wq->num_q_pages * sizeof(wq->prod_idx);
393         wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
394         if (!wq->shadow_idx)
395                 goto err_shadow_idx;
396
397         return 0;
398
399 err_shadow_idx:
400         devm_kfree(&pdev->dev, wq->shadow_wqe);
401         return -ENOMEM;
402 }
403
404 /**
405  * free_wqes_shadow - free WQE shadows of WQ
406  * @wq: WQ to free shadows from
407  **/
408 static void free_wqes_shadow(struct hinic_wq *wq)
409 {
410         struct hinic_hwif *hwif = wq->hwif;
411         struct pci_dev *pdev = hwif->pdev;
412
413         devm_kfree(&pdev->dev, wq->shadow_idx);
414         devm_kfree(&pdev->dev, wq->shadow_wqe);
415 }
416
417 /**
418  * free_wq_pages - free pages of WQ
419  * @hwif: HW interface for releasing dma addresses
420  * @wq: WQ to free pages from
421  * @num_q_pages: number pages to free
422  **/
423 static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
424                           int num_q_pages)
425 {
426         struct pci_dev *pdev = hwif->pdev;
427         int i;
428
429         for (i = 0; i < num_q_pages; i++) {
430                 void **vaddr = &wq->shadow_block_vaddr[i];
431                 u64 *paddr = &wq->block_vaddr[i];
432                 dma_addr_t dma_addr;
433
434                 dma_addr = (dma_addr_t)be64_to_cpu(*paddr);
435                 dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr,
436                                   dma_addr);
437         }
438
439         free_wqes_shadow(wq);
440 }
441
442 /**
443  * alloc_wq_pages - alloc pages for WQ
444  * @hwif: HW interface for allocating dma addresses
445  * @wq: WQ to allocate pages for
446  * @max_pages: maximum pages allowed
447  *
448  * Return 0 - Success, negative - Failure
449  **/
450 static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif,
451                           int max_pages)
452 {
453         struct pci_dev *pdev = hwif->pdev;
454         int i, err, num_q_pages;
455
456         num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size;
457         if (num_q_pages > max_pages) {
458                 dev_err(&pdev->dev, "Number wq pages exceeds the limit\n");
459                 return -EINVAL;
460         }
461
462         if (num_q_pages & (num_q_pages - 1)) {
463                 dev_err(&pdev->dev, "Number wq pages must be power of 2\n");
464                 return -EINVAL;
465         }
466
467         wq->num_q_pages = num_q_pages;
468
469         err = alloc_wqes_shadow(wq);
470         if (err) {
471                 dev_err(&pdev->dev, "Failed to allocate wqe shadow\n");
472                 return err;
473         }
474
475         for (i = 0; i < num_q_pages; i++) {
476                 void **vaddr = &wq->shadow_block_vaddr[i];
477                 u64 *paddr = &wq->block_vaddr[i];
478                 dma_addr_t dma_addr;
479
480                 *vaddr = dma_zalloc_coherent(&pdev->dev, wq->wq_page_size,
481                                              &dma_addr, GFP_KERNEL);
482                 if (!*vaddr) {
483                         dev_err(&pdev->dev, "Failed to allocate wq page\n");
484                         goto err_alloc_wq_pages;
485                 }
486
487                 /* HW uses Big Endian Format */
488                 *paddr = cpu_to_be64(dma_addr);
489         }
490
491         return 0;
492
493 err_alloc_wq_pages:
494         free_wq_pages(wq, hwif, i);
495         return -ENOMEM;
496 }
497
498 /**
499  * hinic_wq_allocate - Allocate the WQ resources from the WQS
500  * @wqs: WQ set from which to allocate the WQ resources
501  * @wq: WQ to allocate resources for it from the WQ set
502  * @wqebb_size: Work Queue Block Byte Size
503  * @wq_page_size: the page size in the Work Queue
504  * @q_depth: number of wqebbs in WQ
505  * @max_wqe_size: maximum WQE size that will be used in the WQ
506  *
507  * Return 0 - Success, negative - Failure
508  **/
509 int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq,
510                       u16 wqebb_size, u16 wq_page_size, u16 q_depth,
511                       u16 max_wqe_size)
512 {
513         struct hinic_hwif *hwif = wqs->hwif;
514         struct pci_dev *pdev = hwif->pdev;
515         u16 num_wqebbs_per_page;
516         int err;
517
518         if (wqebb_size == 0) {
519                 dev_err(&pdev->dev, "wqebb_size must be > 0\n");
520                 return -EINVAL;
521         }
522
523         if (wq_page_size == 0) {
524                 dev_err(&pdev->dev, "wq_page_size must be > 0\n");
525                 return -EINVAL;
526         }
527
528         if (q_depth & (q_depth - 1)) {
529                 dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
530                 return -EINVAL;
531         }
532
533         num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
534
535         if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
536                 dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
537                 return -EINVAL;
538         }
539
540         wq->hwif = hwif;
541
542         err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx);
543         if (err) {
544                 dev_err(&pdev->dev, "Failed to get free wqs next block\n");
545                 return err;
546         }
547
548         wq->wqebb_size = wqebb_size;
549         wq->wq_page_size = wq_page_size;
550         wq->q_depth = q_depth;
551         wq->max_wqe_size = max_wqe_size;
552         wq->num_wqebbs_per_page = num_wqebbs_per_page;
553
554         wq->block_vaddr = WQ_BASE_VADDR(wqs, wq);
555         wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq);
556         wq->block_paddr = WQ_BASE_PADDR(wqs, wq);
557
558         err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES);
559         if (err) {
560                 dev_err(&pdev->dev, "Failed to allocate wq pages\n");
561                 goto err_alloc_wq_pages;
562         }
563
564         atomic_set(&wq->cons_idx, 0);
565         atomic_set(&wq->prod_idx, 0);
566         atomic_set(&wq->delta, q_depth);
567         wq->mask = q_depth - 1;
568
569         return 0;
570
571 err_alloc_wq_pages:
572         wqs_return_block(wqs, wq->page_idx, wq->block_idx);
573         return err;
574 }
575
576 /**
577  * hinic_wq_free - Free the WQ resources to the WQS
578  * @wqs: WQ set to free the WQ resources to it
579  * @wq: WQ to free its resources to the WQ set resources
580  **/
581 void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq)
582 {
583         free_wq_pages(wq, wqs->hwif, wq->num_q_pages);
584
585         wqs_return_block(wqs, wq->page_idx, wq->block_idx);
586 }
587
588 /**
589  * hinic_wqs_cmdq_alloc - Allocate wqs for cmdqs
590  * @cmdq_pages: will hold the pages of the cmdq
591  * @wq: returned wqs
592  * @hwif: HW interface
593  * @cmdq_blocks: number of cmdq blocks/wq to allocate
594  * @wqebb_size: Work Queue Block Byte Size
595  * @wq_page_size: the page size in the Work Queue
596  * @q_depth: number of wqebbs in WQ
597  * @max_wqe_size: maximum WQE size that will be used in the WQ
598  *
599  * Return 0 - Success, negative - Failure
600  **/
601 int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages,
602                          struct hinic_wq *wq, struct hinic_hwif *hwif,
603                          int cmdq_blocks, u16 wqebb_size, u16 wq_page_size,
604                          u16 q_depth, u16 max_wqe_size)
605 {
606         struct pci_dev *pdev = hwif->pdev;
607         u16 num_wqebbs_per_page;
608         int i, j, err = -ENOMEM;
609
610         if (wqebb_size == 0) {
611                 dev_err(&pdev->dev, "wqebb_size must be > 0\n");
612                 return -EINVAL;
613         }
614
615         if (wq_page_size == 0) {
616                 dev_err(&pdev->dev, "wq_page_size must be > 0\n");
617                 return -EINVAL;
618         }
619
620         if (q_depth & (q_depth - 1)) {
621                 dev_err(&pdev->dev, "WQ q_depth must be power of 2\n");
622                 return -EINVAL;
623         }
624
625         num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size;
626
627         if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) {
628                 dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n");
629                 return -EINVAL;
630         }
631
632         cmdq_pages->hwif = hwif;
633
634         err = cmdq_allocate_page(cmdq_pages);
635         if (err) {
636                 dev_err(&pdev->dev, "Failed to allocate CMDQ page\n");
637                 return err;
638         }
639
640         for (i = 0; i < cmdq_blocks; i++) {
641                 wq[i].hwif = hwif;
642                 wq[i].page_idx = 0;
643                 wq[i].block_idx = i;
644
645                 wq[i].wqebb_size = wqebb_size;
646                 wq[i].wq_page_size = wq_page_size;
647                 wq[i].q_depth = q_depth;
648                 wq[i].max_wqe_size = max_wqe_size;
649                 wq[i].num_wqebbs_per_page = num_wqebbs_per_page;
650
651                 wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]);
652                 wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]);
653                 wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]);
654
655                 err = alloc_wq_pages(&wq[i], cmdq_pages->hwif,
656                                      CMDQ_WQ_MAX_PAGES);
657                 if (err) {
658                         dev_err(&pdev->dev, "Failed to alloc CMDQ blocks\n");
659                         goto err_cmdq_block;
660                 }
661
662                 atomic_set(&wq[i].cons_idx, 0);
663                 atomic_set(&wq[i].prod_idx, 0);
664                 atomic_set(&wq[i].delta, q_depth);
665                 wq[i].mask = q_depth - 1;
666         }
667
668         return 0;
669
670 err_cmdq_block:
671         for (j = 0; j < i; j++)
672                 free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages);
673
674         cmdq_free_page(cmdq_pages);
675         return err;
676 }
677
678 /**
679  * hinic_wqs_cmdq_free - Free wqs from cmdqs
680  * @cmdq_pages: hold the pages of the cmdq
681  * @wq: wqs to free
682  * @cmdq_blocks: number of wqs to free
683  **/
684 void hinic_wqs_cmdq_free(struct hinic_cmdq_pages *cmdq_pages,
685                          struct hinic_wq *wq, int cmdq_blocks)
686 {
687         int i;
688
689         for (i = 0; i < cmdq_blocks; i++)
690                 free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages);
691
692         cmdq_free_page(cmdq_pages);
693 }
694
695 static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr,
696                                int num_wqebbs, u16 idx)
697 {
698         void *wqebb_addr;
699         int i;
700
701         for (i = 0; i < num_wqebbs; i++, idx++) {
702                 idx = MASKED_WQE_IDX(wq, idx);
703                 wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
704                              WQE_PAGE_OFF(wq, idx);
705
706                 memcpy(shadow_addr, wqebb_addr, wq->wqebb_size);
707
708                 shadow_addr += wq->wqebb_size;
709         }
710 }
711
712 static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr,
713                                  int num_wqebbs, u16 idx)
714 {
715         void *wqebb_addr;
716         int i;
717
718         for (i = 0; i < num_wqebbs; i++, idx++) {
719                 idx = MASKED_WQE_IDX(wq, idx);
720                 wqebb_addr = WQ_PAGE_ADDR(wq, idx) +
721                              WQE_PAGE_OFF(wq, idx);
722
723                 memcpy(wqebb_addr, shadow_addr, wq->wqebb_size);
724                 shadow_addr += wq->wqebb_size;
725         }
726 }
727
728 /**
729  * hinic_get_wqe - get wqe ptr in the current pi and update the pi
730  * @wq: wq to get wqe from
731  * @wqe_size: wqe size
732  * @prod_idx: returned pi
733  *
734  * Return wqe pointer
735  **/
736 struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size,
737                                    u16 *prod_idx)
738 {
739         int curr_pg, end_pg, num_wqebbs;
740         u16 curr_prod_idx, end_prod_idx;
741
742         *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx));
743
744         num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
745
746         if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) {
747                 atomic_add(num_wqebbs, &wq->delta);
748                 return ERR_PTR(-EBUSY);
749         }
750
751         end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx);
752
753         end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx);
754         curr_prod_idx = end_prod_idx - num_wqebbs;
755         curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx);
756
757         /* end prod index points to the next wqebb, therefore minus 1 */
758         end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1);
759
760         curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx);
761         end_pg = WQE_PAGE_NUM(wq, end_prod_idx);
762
763         *prod_idx = curr_prod_idx;
764
765         if (curr_pg != end_pg) {
766                 void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
767
768                 copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx);
769
770                 wq->shadow_idx[curr_pg] = *prod_idx;
771                 return shadow_addr;
772         }
773
774         return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx);
775 }
776
777 /**
778  * hinic_put_wqe - return the wqe place to use for a new wqe
779  * @wq: wq to return wqe
780  * @wqe_size: wqe size
781  **/
782 void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size)
783 {
784         int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
785
786         atomic_add(num_wqebbs, &wq->cons_idx);
787
788         atomic_add(num_wqebbs, &wq->delta);
789 }
790
791 /**
792  * hinic_read_wqe - read wqe ptr in the current ci
793  * @wq: wq to get read from
794  * @wqe_size: wqe size
795  * @cons_idx: returned ci
796  *
797  * Return wqe pointer
798  **/
799 struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size,
800                                     u16 *cons_idx)
801 {
802         int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
803         u16 curr_cons_idx, end_cons_idx;
804         int curr_pg, end_pg;
805
806         if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth)
807                 return ERR_PTR(-EBUSY);
808
809         curr_cons_idx = atomic_read(&wq->cons_idx);
810
811         curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx);
812         end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1);
813
814         curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx);
815         end_pg = WQE_PAGE_NUM(wq, end_cons_idx);
816
817         *cons_idx = curr_cons_idx;
818
819         if (curr_pg != end_pg) {
820                 void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
821
822                 copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx);
823                 return shadow_addr;
824         }
825
826         return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx);
827 }
828
829 /**
830  * hinic_read_wqe_direct - read wqe directly from ci position
831  * @wq: wq
832  * @cons_idx: ci position
833  *
834  * Return wqe
835  **/
836 struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx)
837 {
838         return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx);
839 }
840
841 /**
842  * wqe_shadow - check if a wqe is shadow
843  * @wq: wq of the wqe
844  * @wqe: the wqe for shadow checking
845  *
846  * Return true - shadow, false - Not shadow
847  **/
848 static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe)
849 {
850         size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size;
851
852         return WQE_IN_RANGE(wqe, wq->shadow_wqe,
853                             &wq->shadow_wqe[wqe_shadow_size]);
854 }
855
856 /**
857  * hinic_write_wqe - write the wqe to the wq
858  * @wq: wq to write wqe to
859  * @wqe: wqe to write
860  * @wqe_size: wqe size
861  **/
862 void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe,
863                      unsigned int wqe_size)
864 {
865         int curr_pg, num_wqebbs;
866         void *shadow_addr;
867         u16 prod_idx;
868
869         if (wqe_shadow(wq, wqe)) {
870                 curr_pg = WQE_SHADOW_PAGE(wq, wqe);
871
872                 prod_idx = wq->shadow_idx[curr_pg];
873                 num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size;
874                 shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size];
875
876                 copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx);
877         }
878 }