GNU Linux-libre 4.9.337-gnu1
[releases.git] / include / linux / qed / qed_chain.h
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #ifndef _QED_CHAIN_H
10 #define _QED_CHAIN_H
11
12 #include <linux/types.h>
13 #include <asm/byteorder.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/slab.h>
17 #include <linux/qed/common_hsi.h>
18
19 enum qed_chain_mode {
20         /* Each Page contains a next pointer at its end */
21         QED_CHAIN_MODE_NEXT_PTR,
22
23         /* Chain is a single page (next ptr) is unrequired */
24         QED_CHAIN_MODE_SINGLE,
25
26         /* Page pointers are located in a side list */
27         QED_CHAIN_MODE_PBL,
28 };
29
30 enum qed_chain_use_mode {
31         QED_CHAIN_USE_TO_PRODUCE,               /* Chain starts empty */
32         QED_CHAIN_USE_TO_CONSUME,               /* Chain starts full */
33         QED_CHAIN_USE_TO_CONSUME_PRODUCE,       /* Chain starts empty */
34 };
35
36 enum qed_chain_cnt_type {
37         /* The chain's size/prod/cons are kept in 16-bit variables */
38         QED_CHAIN_CNT_TYPE_U16,
39
40         /* The chain's size/prod/cons are kept in 32-bit variables  */
41         QED_CHAIN_CNT_TYPE_U32,
42 };
43
44 struct qed_chain_next {
45         struct regpair  next_phys;
46         void            *next_virt;
47 };
48
49 struct qed_chain_pbl_u16 {
50         u16 prod_page_idx;
51         u16 cons_page_idx;
52 };
53
54 struct qed_chain_pbl_u32 {
55         u32 prod_page_idx;
56         u32 cons_page_idx;
57 };
58
59 struct qed_chain_pbl {
60         /* Base address of a pre-allocated buffer for pbl */
61         dma_addr_t      p_phys_table;
62         void            *p_virt_table;
63
64         /* Table for keeping the virtual addresses of the chain pages,
65          * respectively to the physical addresses in the pbl table.
66          */
67         void **pp_virt_addr_tbl;
68
69         /* Index to current used page by producer/consumer */
70         union {
71                 struct qed_chain_pbl_u16 pbl16;
72                 struct qed_chain_pbl_u32 pbl32;
73         } u;
74 };
75
76 struct qed_chain_u16 {
77         /* Cyclic index of next element to produce/consme */
78         u16 prod_idx;
79         u16 cons_idx;
80 };
81
82 struct qed_chain_u32 {
83         /* Cyclic index of next element to produce/consme */
84         u32 prod_idx;
85         u32 cons_idx;
86 };
87
88 struct qed_chain {
89         void                    *p_virt_addr;
90         dma_addr_t              p_phys_addr;
91         void                    *p_prod_elem;
92         void                    *p_cons_elem;
93
94         enum qed_chain_mode     mode;
95         enum qed_chain_use_mode intended_use; /* used to produce/consume */
96         enum qed_chain_cnt_type cnt_type;
97
98         union {
99                 struct qed_chain_u16 chain16;
100                 struct qed_chain_u32 chain32;
101         } u;
102
103         u32 page_cnt;
104
105         /* Number of elements - capacity is for usable elements only,
106          * while size will contain total number of elements [for entire chain].
107          */
108         u32 capacity;
109         u32 size;
110
111         /* Elements information for fast calculations */
112         u16                     elem_per_page;
113         u16                     elem_per_page_mask;
114         u16                     elem_unusable;
115         u16                     usable_per_page;
116         u16                     elem_size;
117         u16                     next_page_mask;
118         struct qed_chain_pbl    pbl;
119 };
120
121 #define QED_CHAIN_PBL_ENTRY_SIZE        (8)
122 #define QED_CHAIN_PAGE_SIZE             (0x1000)
123 #define ELEMS_PER_PAGE(elem_size)       (QED_CHAIN_PAGE_SIZE / (elem_size))
124
125 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)     \
126         ((mode == QED_CHAIN_MODE_NEXT_PTR) ?         \
127          (1 + ((sizeof(struct qed_chain_next) - 1) / \
128                (elem_size))) : 0)
129
130 #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
131         ((u32)(ELEMS_PER_PAGE(elem_size) -     \
132                UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
133
134 #define QED_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \
135         DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
136
137 #define is_chain_u16(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U16)
138 #define is_chain_u32(p) ((p)->cnt_type == QED_CHAIN_CNT_TYPE_U32)
139
140 /* Accessors */
141 static inline u16 qed_chain_get_prod_idx(struct qed_chain *p_chain)
142 {
143         return p_chain->u.chain16.prod_idx;
144 }
145
146 static inline u16 qed_chain_get_cons_idx(struct qed_chain *p_chain)
147 {
148         return p_chain->u.chain16.cons_idx;
149 }
150
151 static inline u32 qed_chain_get_cons_idx_u32(struct qed_chain *p_chain)
152 {
153         return p_chain->u.chain32.cons_idx;
154 }
155
156 static inline u16 qed_chain_get_elem_left(struct qed_chain *p_chain)
157 {
158         u16 elem_per_page = p_chain->elem_per_page;
159         u32 prod = p_chain->u.chain16.prod_idx;
160         u32 cons = p_chain->u.chain16.cons_idx;
161         u16 used;
162
163         if (prod < cons)
164                 prod += (u32)U16_MAX + 1;
165
166         used = (u16)(prod - cons);
167         if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
168                 used -= prod / elem_per_page - cons / elem_per_page;
169
170         return (u16)(p_chain->capacity - used);
171 }
172
173 static inline u32 qed_chain_get_elem_left_u32(struct qed_chain *p_chain)
174 {
175         u16 elem_per_page = p_chain->elem_per_page;
176         u64 prod = p_chain->u.chain32.prod_idx;
177         u64 cons = p_chain->u.chain32.cons_idx;
178         u32 used;
179
180         if (prod < cons)
181                 prod += (u64)U32_MAX + 1;
182
183         used = (u32)(prod - cons);
184         if (p_chain->mode == QED_CHAIN_MODE_NEXT_PTR)
185                 used -= (u32)(prod / elem_per_page - cons / elem_per_page);
186
187         return p_chain->capacity - used;
188 }
189
190 static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
191 {
192         return p_chain->usable_per_page;
193 }
194
195 static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
196 {
197         return p_chain->elem_unusable;
198 }
199
200 static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
201 {
202         return p_chain->page_cnt;
203 }
204
205 static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
206 {
207         return p_chain->pbl.p_phys_table;
208 }
209
210 /**
211  * @brief qed_chain_advance_page -
212  *
213  * Advance the next element accros pages for a linked chain
214  *
215  * @param p_chain
216  * @param p_next_elem
217  * @param idx_to_inc
218  * @param page_to_inc
219  */
220 static inline void
221 qed_chain_advance_page(struct qed_chain *p_chain,
222                        void **p_next_elem, void *idx_to_inc, void *page_to_inc)
223
224 {
225         struct qed_chain_next *p_next = NULL;
226         u32 page_index = 0;
227         switch (p_chain->mode) {
228         case QED_CHAIN_MODE_NEXT_PTR:
229                 p_next = *p_next_elem;
230                 *p_next_elem = p_next->next_virt;
231                 if (is_chain_u16(p_chain))
232                         *(u16 *)idx_to_inc += p_chain->elem_unusable;
233                 else
234                         *(u32 *)idx_to_inc += p_chain->elem_unusable;
235                 break;
236         case QED_CHAIN_MODE_SINGLE:
237                 *p_next_elem = p_chain->p_virt_addr;
238                 break;
239
240         case QED_CHAIN_MODE_PBL:
241                 if (is_chain_u16(p_chain)) {
242                         if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
243                                 *(u16 *)page_to_inc = 0;
244                         page_index = *(u16 *)page_to_inc;
245                 } else {
246                         if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
247                                 *(u32 *)page_to_inc = 0;
248                         page_index = *(u32 *)page_to_inc;
249                 }
250                 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
251         }
252 }
253
254 #define is_unusable_idx(p, idx) \
255         (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
256
257 #define is_unusable_idx_u32(p, idx) \
258         (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
259 #define is_unusable_next_idx(p, idx)                             \
260         ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \
261          (p)->usable_per_page)
262
263 #define is_unusable_next_idx_u32(p, idx)                         \
264         ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) == \
265          (p)->usable_per_page)
266
267 #define test_and_skip(p, idx)                                              \
268         do {                                            \
269                 if (is_chain_u16(p)) {                                     \
270                         if (is_unusable_idx(p, idx))                       \
271                                 (p)->u.chain16.idx += (p)->elem_unusable;  \
272                 } else {                                                   \
273                         if (is_unusable_idx_u32(p, idx))                   \
274                                 (p)->u.chain32.idx += (p)->elem_unusable;  \
275                 }                                       \
276         } while (0)
277
278 /**
279  * @brief qed_chain_return_produced -
280  *
281  * A chain in which the driver "Produces" elements should use this API
282  * to indicate previous produced elements are now consumed.
283  *
284  * @param p_chain
285  */
286 static inline void qed_chain_return_produced(struct qed_chain *p_chain)
287 {
288         if (is_chain_u16(p_chain))
289                 p_chain->u.chain16.cons_idx++;
290         else
291                 p_chain->u.chain32.cons_idx++;
292         test_and_skip(p_chain, cons_idx);
293 }
294
295 /**
296  * @brief qed_chain_produce -
297  *
298  * A chain in which the driver "Produces" elements should use this to get
299  * a pointer to the next element which can be "Produced". It's driver
300  * responsibility to validate that the chain has room for new element.
301  *
302  * @param p_chain
303  *
304  * @return void*, a pointer to next element
305  */
306 static inline void *qed_chain_produce(struct qed_chain *p_chain)
307 {
308         void *p_ret = NULL, *p_prod_idx, *p_prod_page_idx;
309
310         if (is_chain_u16(p_chain)) {
311                 if ((p_chain->u.chain16.prod_idx &
312                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
313                         p_prod_idx = &p_chain->u.chain16.prod_idx;
314                         p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx;
315                         qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
316                                                p_prod_idx, p_prod_page_idx);
317                 }
318                 p_chain->u.chain16.prod_idx++;
319         } else {
320                 if ((p_chain->u.chain32.prod_idx &
321                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
322                         p_prod_idx = &p_chain->u.chain32.prod_idx;
323                         p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx;
324                         qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
325                                                p_prod_idx, p_prod_page_idx);
326                 }
327                 p_chain->u.chain32.prod_idx++;
328         }
329
330         p_ret = p_chain->p_prod_elem;
331         p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
332                                         p_chain->elem_size);
333
334         return p_ret;
335 }
336
337 /**
338  * @brief qed_chain_get_capacity -
339  *
340  * Get the maximum number of BDs in chain
341  *
342  * @param p_chain
343  * @param num
344  *
345  * @return number of unusable BDs
346  */
347 static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain)
348 {
349         return p_chain->capacity;
350 }
351
352 /**
353  * @brief qed_chain_recycle_consumed -
354  *
355  * Returns an element which was previously consumed;
356  * Increments producers so they could be written to FW.
357  *
358  * @param p_chain
359  */
360 static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain)
361 {
362         test_and_skip(p_chain, prod_idx);
363         if (is_chain_u16(p_chain))
364                 p_chain->u.chain16.prod_idx++;
365         else
366                 p_chain->u.chain32.prod_idx++;
367 }
368
369 /**
370  * @brief qed_chain_consume -
371  *
372  * A Chain in which the driver utilizes data written by a different source
373  * (i.e., FW) should use this to access passed buffers.
374  *
375  * @param p_chain
376  *
377  * @return void*, a pointer to the next buffer written
378  */
379 static inline void *qed_chain_consume(struct qed_chain *p_chain)
380 {
381         void *p_ret = NULL, *p_cons_idx, *p_cons_page_idx;
382
383         if (is_chain_u16(p_chain)) {
384                 if ((p_chain->u.chain16.cons_idx &
385                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
386                         p_cons_idx = &p_chain->u.chain16.cons_idx;
387                         p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx;
388                         qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
389                                                p_cons_idx, p_cons_page_idx);
390                 }
391                 p_chain->u.chain16.cons_idx++;
392         } else {
393                 if ((p_chain->u.chain32.cons_idx &
394                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
395                         p_cons_idx = &p_chain->u.chain32.cons_idx;
396                         p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx;
397                 qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
398                                                p_cons_idx, p_cons_page_idx);
399                 }
400                 p_chain->u.chain32.cons_idx++;
401         }
402
403         p_ret = p_chain->p_cons_elem;
404         p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
405                                         p_chain->elem_size);
406
407         return p_ret;
408 }
409
410 /**
411  * @brief qed_chain_reset - Resets the chain to its start state
412  *
413  * @param p_chain pointer to a previously allocted chain
414  */
415 static inline void qed_chain_reset(struct qed_chain *p_chain)
416 {
417         u32 i;
418
419         if (is_chain_u16(p_chain)) {
420                 p_chain->u.chain16.prod_idx = 0;
421                 p_chain->u.chain16.cons_idx = 0;
422         } else {
423                 p_chain->u.chain32.prod_idx = 0;
424                 p_chain->u.chain32.cons_idx = 0;
425         }
426         p_chain->p_cons_elem = p_chain->p_virt_addr;
427         p_chain->p_prod_elem = p_chain->p_virt_addr;
428
429         if (p_chain->mode == QED_CHAIN_MODE_PBL) {
430                 /* Use (page_cnt - 1) as a reset value for the prod/cons page's
431                  * indices, to avoid unnecessary page advancing on the first
432                  * call to qed_chain_produce/consume. Instead, the indices
433                  * will be advanced to page_cnt and then will be wrapped to 0.
434                  */
435                 u32 reset_val = p_chain->page_cnt - 1;
436
437                 if (is_chain_u16(p_chain)) {
438                         p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val;
439                         p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val;
440                 } else {
441                         p_chain->pbl.u.pbl32.prod_page_idx = reset_val;
442                         p_chain->pbl.u.pbl32.cons_page_idx = reset_val;
443                 }
444         }
445
446         switch (p_chain->intended_use) {
447         case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
448         case QED_CHAIN_USE_TO_PRODUCE:
449                 /* Do nothing */
450                 break;
451
452         case QED_CHAIN_USE_TO_CONSUME:
453                 /* produce empty elements */
454                 for (i = 0; i < p_chain->capacity; i++)
455                         qed_chain_recycle_consumed(p_chain);
456                 break;
457         }
458 }
459
460 /**
461  * @brief qed_chain_init - Initalizes a basic chain struct
462  *
463  * @param p_chain
464  * @param p_virt_addr
465  * @param p_phys_addr   physical address of allocated buffer's beginning
466  * @param page_cnt      number of pages in the allocated buffer
467  * @param elem_size     size of each element in the chain
468  * @param intended_use
469  * @param mode
470  */
471 static inline void qed_chain_init_params(struct qed_chain *p_chain,
472                                          u32 page_cnt,
473                                          u8 elem_size,
474                                          enum qed_chain_use_mode intended_use,
475                                          enum qed_chain_mode mode,
476                                          enum qed_chain_cnt_type cnt_type)
477 {
478         /* chain fixed parameters */
479         p_chain->p_virt_addr = NULL;
480         p_chain->p_phys_addr = 0;
481         p_chain->elem_size      = elem_size;
482         p_chain->intended_use = intended_use;
483         p_chain->mode           = mode;
484         p_chain->cnt_type = cnt_type;
485
486         p_chain->elem_per_page          = ELEMS_PER_PAGE(elem_size);
487         p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
488         p_chain->elem_per_page_mask     = p_chain->elem_per_page - 1;
489         p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
490         p_chain->next_page_mask = (p_chain->usable_per_page &
491                                    p_chain->elem_per_page_mask);
492
493         p_chain->page_cnt = page_cnt;
494         p_chain->capacity = p_chain->usable_per_page * page_cnt;
495         p_chain->size = p_chain->elem_per_page * page_cnt;
496
497         p_chain->pbl.p_phys_table = 0;
498         p_chain->pbl.p_virt_table = NULL;
499         p_chain->pbl.pp_virt_addr_tbl = NULL;
500 }
501
502 /**
503  * @brief qed_chain_init_mem -
504  *
505  * Initalizes a basic chain struct with its chain buffers
506  *
507  * @param p_chain
508  * @param p_virt_addr   virtual address of allocated buffer's beginning
509  * @param p_phys_addr   physical address of allocated buffer's beginning
510  *
511  */
512 static inline void qed_chain_init_mem(struct qed_chain *p_chain,
513                                       void *p_virt_addr, dma_addr_t p_phys_addr)
514 {
515         p_chain->p_virt_addr = p_virt_addr;
516         p_chain->p_phys_addr = p_phys_addr;
517 }
518
519 /**
520  * @brief qed_chain_init_pbl_mem -
521  *
522  * Initalizes a basic chain struct with its pbl buffers
523  *
524  * @param p_chain
525  * @param p_virt_pbl    pointer to a pre allocated side table which will hold
526  *                      virtual page addresses.
527  * @param p_phys_pbl    pointer to a pre-allocated side table which will hold
528  *                      physical page addresses.
529  * @param pp_virt_addr_tbl
530  *                      pointer to a pre-allocated side table which will hold
531  *                      the virtual addresses of the chain pages.
532  *
533  */
534 static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
535                                           void *p_virt_pbl,
536                                           dma_addr_t p_phys_pbl,
537                                           void **pp_virt_addr_tbl)
538 {
539         p_chain->pbl.p_phys_table = p_phys_pbl;
540         p_chain->pbl.p_virt_table = p_virt_pbl;
541         p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
542 }
543
544 /**
545  * @brief qed_chain_init_next_ptr_elem -
546  *
547  * Initalizes a next pointer element
548  *
549  * @param p_chain
550  * @param p_virt_curr   virtual address of a chain page of which the next
551  *                      pointer element is initialized
552  * @param p_virt_next   virtual address of the next chain page
553  * @param p_phys_next   physical address of the next chain page
554  *
555  */
556 static inline void
557 qed_chain_init_next_ptr_elem(struct qed_chain *p_chain,
558                              void *p_virt_curr,
559                              void *p_virt_next, dma_addr_t p_phys_next)
560 {
561         struct qed_chain_next *p_next;
562         u32 size;
563
564         size = p_chain->elem_size * p_chain->usable_per_page;
565         p_next = (struct qed_chain_next *)((u8 *)p_virt_curr + size);
566
567         DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
568
569         p_next->next_virt = p_virt_next;
570 }
571
572 /**
573  * @brief qed_chain_get_last_elem -
574  *
575  * Returns a pointer to the last element of the chain
576  *
577  * @param p_chain
578  *
579  * @return void*
580  */
581 static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
582 {
583         struct qed_chain_next *p_next = NULL;
584         void *p_virt_addr = NULL;
585         u32 size, last_page_idx;
586
587         if (!p_chain->p_virt_addr)
588                 goto out;
589
590         switch (p_chain->mode) {
591         case QED_CHAIN_MODE_NEXT_PTR:
592                 size = p_chain->elem_size * p_chain->usable_per_page;
593                 p_virt_addr = p_chain->p_virt_addr;
594                 p_next = (struct qed_chain_next *)((u8 *)p_virt_addr + size);
595                 while (p_next->next_virt != p_chain->p_virt_addr) {
596                         p_virt_addr = p_next->next_virt;
597                         p_next = (struct qed_chain_next *)((u8 *)p_virt_addr +
598                                                            size);
599                 }
600                 break;
601         case QED_CHAIN_MODE_SINGLE:
602                 p_virt_addr = p_chain->p_virt_addr;
603                 break;
604         case QED_CHAIN_MODE_PBL:
605                 last_page_idx = p_chain->page_cnt - 1;
606                 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
607                 break;
608         }
609         /* p_virt_addr points at this stage to the last page of the chain */
610         size = p_chain->elem_size * (p_chain->usable_per_page - 1);
611         p_virt_addr = (u8 *)p_virt_addr + size;
612 out:
613         return p_virt_addr;
614 }
615
616 /**
617  * @brief qed_chain_set_prod - sets the prod to the given value
618  *
619  * @param prod_idx
620  * @param p_prod_elem
621  */
622 static inline void qed_chain_set_prod(struct qed_chain *p_chain,
623                                       u32 prod_idx, void *p_prod_elem)
624 {
625         if (is_chain_u16(p_chain))
626                 p_chain->u.chain16.prod_idx = (u16) prod_idx;
627         else
628                 p_chain->u.chain32.prod_idx = prod_idx;
629         p_chain->p_prod_elem = p_prod_elem;
630 }
631
632 /**
633  * @brief qed_chain_pbl_zero_mem - set chain memory to 0
634  *
635  * @param p_chain
636  */
637 static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain)
638 {
639         u32 i, page_cnt;
640
641         if (p_chain->mode != QED_CHAIN_MODE_PBL)
642                 return;
643
644         page_cnt = qed_chain_get_page_cnt(p_chain);
645
646         for (i = 0; i < page_cnt; i++)
647                 memset(p_chain->pbl.pp_virt_addr_tbl[i], 0,
648                        QED_CHAIN_PAGE_SIZE);
649 }
650
651 #endif