GNU Linux-libre 4.19.264-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlx4 / icm.c
1 /*
2  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/errno.h>
35 #include <linux/mm.h>
36 #include <linux/scatterlist.h>
37 #include <linux/slab.h>
38
39 #include <linux/mlx4/cmd.h>
40
41 #include "mlx4.h"
42 #include "icm.h"
43 #include "fw.h"
44
45 /*
46  * We allocate in as big chunks as we can, up to a maximum of 256 KB
47  * per chunk. Note that the chunks are not necessarily in contiguous
48  * physical memory.
49  */
50 enum {
51         MLX4_ICM_ALLOC_SIZE     = 1 << 18,
52         MLX4_TABLE_CHUNK_SIZE   = 1 << 18,
53 };
54
55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
56 {
57         int i;
58
59         if (chunk->nsg > 0)
60                 pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
61                              PCI_DMA_BIDIRECTIONAL);
62
63         for (i = 0; i < chunk->npages; ++i)
64                 __free_pages(sg_page(&chunk->sg[i]),
65                              get_order(chunk->sg[i].length));
66 }
67
68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
69 {
70         int i;
71
72         for (i = 0; i < chunk->npages; ++i)
73                 dma_free_coherent(&dev->persist->pdev->dev,
74                                   chunk->buf[i].size,
75                                   chunk->buf[i].addr,
76                                   chunk->buf[i].dma_addr);
77 }
78
79 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
80 {
81         struct mlx4_icm_chunk *chunk, *tmp;
82
83         if (!icm)
84                 return;
85
86         list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
87                 if (coherent)
88                         mlx4_free_icm_coherent(dev, chunk);
89                 else
90                         mlx4_free_icm_pages(dev, chunk);
91
92                 kfree(chunk);
93         }
94
95         kfree(icm);
96 }
97
98 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
99                                 gfp_t gfp_mask, int node)
100 {
101         struct page *page;
102
103         page = alloc_pages_node(node, gfp_mask, order);
104         if (!page) {
105                 page = alloc_pages(gfp_mask, order);
106                 if (!page)
107                         return -ENOMEM;
108         }
109
110         sg_set_page(mem, page, PAGE_SIZE << order, 0);
111         return 0;
112 }
113
114 static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
115                                    int order, gfp_t gfp_mask)
116 {
117         buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
118                                        &buf->dma_addr, gfp_mask);
119         if (!buf->addr)
120                 return -ENOMEM;
121
122         if (offset_in_page(buf->addr)) {
123                 dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
124                                   buf->dma_addr);
125                 return -ENOMEM;
126         }
127
128         buf->size = PAGE_SIZE << order;
129         return 0;
130 }
131
132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
133                                 gfp_t gfp_mask, int coherent)
134 {
135         struct mlx4_icm *icm;
136         struct mlx4_icm_chunk *chunk = NULL;
137         int cur_order;
138         gfp_t mask;
139         int ret;
140
141         /* We use sg_set_buf for coherent allocs, which assumes low memory */
142         BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
143
144         icm = kmalloc_node(sizeof(*icm),
145                            gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
146                            dev->numa_node);
147         if (!icm) {
148                 icm = kmalloc(sizeof(*icm),
149                               gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
150                 if (!icm)
151                         return NULL;
152         }
153
154         icm->refcount = 0;
155         INIT_LIST_HEAD(&icm->chunk_list);
156
157         cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
158
159         while (npages > 0) {
160                 if (!chunk) {
161                         chunk = kzalloc_node(sizeof(*chunk),
162                                              gfp_mask & ~(__GFP_HIGHMEM |
163                                                           __GFP_NOWARN),
164                                              dev->numa_node);
165                         if (!chunk) {
166                                 chunk = kzalloc(sizeof(*chunk),
167                                                 gfp_mask & ~(__GFP_HIGHMEM |
168                                                              __GFP_NOWARN));
169                                 if (!chunk)
170                                         goto fail;
171                         }
172                         chunk->coherent = coherent;
173
174                         if (!coherent)
175                                 sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
176                         list_add_tail(&chunk->list, &icm->chunk_list);
177                 }
178
179                 while (1 << cur_order > npages)
180                         --cur_order;
181
182                 mask = gfp_mask;
183                 if (cur_order)
184                         mask &= ~__GFP_DIRECT_RECLAIM;
185
186                 if (coherent)
187                         ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
188                                                 &chunk->buf[chunk->npages],
189                                                 cur_order, mask);
190                 else
191                         ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
192                                                    cur_order, mask,
193                                                    dev->numa_node);
194
195                 if (ret) {
196                         if (--cur_order < 0)
197                                 goto fail;
198                         else
199                                 continue;
200                 }
201
202                 ++chunk->npages;
203
204                 if (coherent)
205                         ++chunk->nsg;
206                 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
207                         chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
208                                                 chunk->npages,
209                                                 PCI_DMA_BIDIRECTIONAL);
210
211                         if (chunk->nsg <= 0)
212                                 goto fail;
213                 }
214
215                 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
216                         chunk = NULL;
217
218                 npages -= 1 << cur_order;
219         }
220
221         if (!coherent && chunk) {
222                 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
223                                         chunk->npages,
224                                         PCI_DMA_BIDIRECTIONAL);
225
226                 if (chunk->nsg <= 0)
227                         goto fail;
228         }
229
230         return icm;
231
232 fail:
233         mlx4_free_icm(dev, icm, coherent);
234         return NULL;
235 }
236
237 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
238 {
239         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
240 }
241
242 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
243 {
244         return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
245                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
246 }
247
248 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
249 {
250         return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
251 }
252
253 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
254 {
255         return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
256                         MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
257 }
258
259 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
260 {
261         u32 i = (obj & (table->num_obj - 1)) /
262                         (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
263         int ret = 0;
264
265         mutex_lock(&table->mutex);
266
267         if (table->icm[i]) {
268                 ++table->icm[i]->refcount;
269                 goto out;
270         }
271
272         table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
273                                        (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
274                                        __GFP_NOWARN, table->coherent);
275         if (!table->icm[i]) {
276                 ret = -ENOMEM;
277                 goto out;
278         }
279
280         if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
281                          (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
282                 mlx4_free_icm(dev, table->icm[i], table->coherent);
283                 table->icm[i] = NULL;
284                 ret = -ENOMEM;
285                 goto out;
286         }
287
288         ++table->icm[i]->refcount;
289
290 out:
291         mutex_unlock(&table->mutex);
292         return ret;
293 }
294
295 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
296 {
297         u32 i;
298         u64 offset;
299
300         i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
301
302         mutex_lock(&table->mutex);
303
304         if (--table->icm[i]->refcount == 0) {
305                 offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
306                 mlx4_UNMAP_ICM(dev, table->virt + offset,
307                                MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
308                 mlx4_free_icm(dev, table->icm[i], table->coherent);
309                 table->icm[i] = NULL;
310         }
311
312         mutex_unlock(&table->mutex);
313 }
314
315 void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
316                         dma_addr_t *dma_handle)
317 {
318         int offset, dma_offset, i;
319         u64 idx;
320         struct mlx4_icm_chunk *chunk;
321         struct mlx4_icm *icm;
322         void *addr = NULL;
323
324         if (!table->lowmem)
325                 return NULL;
326
327         mutex_lock(&table->mutex);
328
329         idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
330         icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
331         dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
332
333         if (!icm)
334                 goto out;
335
336         list_for_each_entry(chunk, &icm->chunk_list, list) {
337                 for (i = 0; i < chunk->npages; ++i) {
338                         dma_addr_t dma_addr;
339                         size_t len;
340
341                         if (table->coherent) {
342                                 len = chunk->buf[i].size;
343                                 dma_addr = chunk->buf[i].dma_addr;
344                                 addr = chunk->buf[i].addr;
345                         } else {
346                                 struct page *page;
347
348                                 len = sg_dma_len(&chunk->sg[i]);
349                                 dma_addr = sg_dma_address(&chunk->sg[i]);
350
351                                 /* XXX: we should never do this for highmem
352                                  * allocation.  This function either needs
353                                  * to be split, or the kernel virtual address
354                                  * return needs to be made optional.
355                                  */
356                                 page = sg_page(&chunk->sg[i]);
357                                 addr = lowmem_page_address(page);
358                         }
359
360                         if (dma_handle && dma_offset >= 0) {
361                                 if (len > dma_offset)
362                                         *dma_handle = dma_addr + dma_offset;
363                                 dma_offset -= len;
364                         }
365
366                         /*
367                          * DMA mapping can merge pages but not split them,
368                          * so if we found the page, dma_handle has already
369                          * been assigned to.
370                          */
371                         if (len > offset)
372                                 goto out;
373                         offset -= len;
374                 }
375         }
376
377         addr = NULL;
378 out:
379         mutex_unlock(&table->mutex);
380         return addr ? addr + offset : NULL;
381 }
382
383 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
384                          u32 start, u32 end)
385 {
386         int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
387         int err;
388         u32 i;
389
390         for (i = start; i <= end; i += inc) {
391                 err = mlx4_table_get(dev, table, i);
392                 if (err)
393                         goto fail;
394         }
395
396         return 0;
397
398 fail:
399         while (i > start) {
400                 i -= inc;
401                 mlx4_table_put(dev, table, i);
402         }
403
404         return err;
405 }
406
407 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
408                           u32 start, u32 end)
409 {
410         u32 i;
411
412         for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
413                 mlx4_table_put(dev, table, i);
414 }
415
416 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
417                         u64 virt, int obj_size, u32 nobj, int reserved,
418                         int use_lowmem, int use_coherent)
419 {
420         int obj_per_chunk;
421         int num_icm;
422         unsigned chunk_size;
423         int i;
424         u64 size;
425
426         obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
427         if (WARN_ON(!obj_per_chunk))
428                 return -EINVAL;
429         num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
430
431         table->icm      = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
432         if (!table->icm)
433                 return -ENOMEM;
434         table->virt     = virt;
435         table->num_icm  = num_icm;
436         table->num_obj  = nobj;
437         table->obj_size = obj_size;
438         table->lowmem   = use_lowmem;
439         table->coherent = use_coherent;
440         mutex_init(&table->mutex);
441
442         size = (u64) nobj * obj_size;
443         for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
444                 chunk_size = MLX4_TABLE_CHUNK_SIZE;
445                 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
446                         chunk_size = PAGE_ALIGN(size -
447                                         i * MLX4_TABLE_CHUNK_SIZE);
448
449                 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
450                                                (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
451                                                __GFP_NOWARN, use_coherent);
452                 if (!table->icm[i])
453                         goto err;
454                 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
455                         mlx4_free_icm(dev, table->icm[i], use_coherent);
456                         table->icm[i] = NULL;
457                         goto err;
458                 }
459
460                 /*
461                  * Add a reference to this ICM chunk so that it never
462                  * gets freed (since it contains reserved firmware objects).
463                  */
464                 ++table->icm[i]->refcount;
465         }
466
467         return 0;
468
469 err:
470         for (i = 0; i < num_icm; ++i)
471                 if (table->icm[i]) {
472                         mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
473                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
474                         mlx4_free_icm(dev, table->icm[i], use_coherent);
475                 }
476
477         kvfree(table->icm);
478
479         return -ENOMEM;
480 }
481
482 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
483 {
484         int i;
485
486         for (i = 0; i < table->num_icm; ++i)
487                 if (table->icm[i]) {
488                         mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
489                                        MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
490                         mlx4_free_icm(dev, table->icm[i], table->coherent);
491                 }
492
493         kvfree(table->icm);
494 }