2 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/errno.h>
36 #include <linux/scatterlist.h>
37 #include <linux/slab.h>
39 #include <linux/mlx4/cmd.h>
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB
47 * per chunk. Note that the chunks are not necessarily in contiguous
51 MLX4_ICM_ALLOC_SIZE = 1 << 18,
52 MLX4_TABLE_CHUNK_SIZE = 1 << 18,
55 static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
60 pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
61 PCI_DMA_BIDIRECTIONAL);
63 for (i = 0; i < chunk->npages; ++i)
64 __free_pages(sg_page(&chunk->sg[i]),
65 get_order(chunk->sg[i].length));
68 static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
72 for (i = 0; i < chunk->npages; ++i)
73 dma_free_coherent(&dev->persist->pdev->dev,
76 chunk->buf[i].dma_addr);
79 void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
81 struct mlx4_icm_chunk *chunk, *tmp;
86 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
88 mlx4_free_icm_coherent(dev, chunk);
90 mlx4_free_icm_pages(dev, chunk);
98 static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
99 gfp_t gfp_mask, int node)
103 page = alloc_pages_node(node, gfp_mask, order);
105 page = alloc_pages(gfp_mask, order);
110 sg_set_page(mem, page, PAGE_SIZE << order, 0);
114 static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
115 int order, gfp_t gfp_mask)
117 buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
118 &buf->dma_addr, gfp_mask);
122 if (offset_in_page(buf->addr)) {
123 dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
128 buf->size = PAGE_SIZE << order;
132 struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
133 gfp_t gfp_mask, int coherent)
135 struct mlx4_icm *icm;
136 struct mlx4_icm_chunk *chunk = NULL;
141 /* We use sg_set_buf for coherent allocs, which assumes low memory */
142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
144 icm = kmalloc_node(sizeof(*icm),
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN),
148 icm = kmalloc(sizeof(*icm),
149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
155 INIT_LIST_HEAD(&icm->chunk_list);
157 cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
161 chunk = kzalloc_node(sizeof(*chunk),
162 gfp_mask & ~(__GFP_HIGHMEM |
166 chunk = kzalloc(sizeof(*chunk),
167 gfp_mask & ~(__GFP_HIGHMEM |
172 chunk->coherent = coherent;
175 sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
176 list_add_tail(&chunk->list, &icm->chunk_list);
179 while (1 << cur_order > npages)
184 mask &= ~__GFP_DIRECT_RECLAIM;
187 ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
188 &chunk->buf[chunk->npages],
191 ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
206 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
207 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
209 PCI_DMA_BIDIRECTIONAL);
215 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
218 npages -= 1 << cur_order;
221 if (!coherent && chunk) {
222 chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
224 PCI_DMA_BIDIRECTIONAL);
233 mlx4_free_icm(dev, icm, coherent);
237 static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
239 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
242 static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
244 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
245 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
248 int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
250 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
253 int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
255 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
256 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
259 int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
261 u32 i = (obj & (table->num_obj - 1)) /
262 (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
265 mutex_lock(&table->mutex);
268 ++table->icm[i]->refcount;
272 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
273 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
274 __GFP_NOWARN, table->coherent);
275 if (!table->icm[i]) {
280 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
281 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
282 mlx4_free_icm(dev, table->icm[i], table->coherent);
283 table->icm[i] = NULL;
288 ++table->icm[i]->refcount;
291 mutex_unlock(&table->mutex);
295 void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
300 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
302 mutex_lock(&table->mutex);
304 if (--table->icm[i]->refcount == 0) {
305 offset = (u64) i * MLX4_TABLE_CHUNK_SIZE;
306 mlx4_UNMAP_ICM(dev, table->virt + offset,
307 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
308 mlx4_free_icm(dev, table->icm[i], table->coherent);
309 table->icm[i] = NULL;
312 mutex_unlock(&table->mutex);
315 void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
316 dma_addr_t *dma_handle)
318 int offset, dma_offset, i;
320 struct mlx4_icm_chunk *chunk;
321 struct mlx4_icm *icm;
327 mutex_lock(&table->mutex);
329 idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size;
330 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
331 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
336 list_for_each_entry(chunk, &icm->chunk_list, list) {
337 for (i = 0; i < chunk->npages; ++i) {
341 if (table->coherent) {
342 len = chunk->buf[i].size;
343 dma_addr = chunk->buf[i].dma_addr;
344 addr = chunk->buf[i].addr;
348 len = sg_dma_len(&chunk->sg[i]);
349 dma_addr = sg_dma_address(&chunk->sg[i]);
351 /* XXX: we should never do this for highmem
352 * allocation. This function either needs
353 * to be split, or the kernel virtual address
354 * return needs to be made optional.
356 page = sg_page(&chunk->sg[i]);
357 addr = lowmem_page_address(page);
360 if (dma_handle && dma_offset >= 0) {
361 if (len > dma_offset)
362 *dma_handle = dma_addr + dma_offset;
367 * DMA mapping can merge pages but not split them,
368 * so if we found the page, dma_handle has already
379 mutex_unlock(&table->mutex);
380 return addr ? addr + offset : NULL;
383 int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
386 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
390 for (i = start; i <= end; i += inc) {
391 err = mlx4_table_get(dev, table, i);
401 mlx4_table_put(dev, table, i);
407 void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
412 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
413 mlx4_table_put(dev, table, i);
416 int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
417 u64 virt, int obj_size, u32 nobj, int reserved,
418 int use_lowmem, int use_coherent)
426 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
427 if (WARN_ON(!obj_per_chunk))
429 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
431 table->icm = kvcalloc(num_icm, sizeof(*table->icm), GFP_KERNEL);
435 table->num_icm = num_icm;
436 table->num_obj = nobj;
437 table->obj_size = obj_size;
438 table->lowmem = use_lowmem;
439 table->coherent = use_coherent;
440 mutex_init(&table->mutex);
442 size = (u64) nobj * obj_size;
443 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
444 chunk_size = MLX4_TABLE_CHUNK_SIZE;
445 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
446 chunk_size = PAGE_ALIGN(size -
447 i * MLX4_TABLE_CHUNK_SIZE);
449 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
450 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
451 __GFP_NOWARN, use_coherent);
454 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
455 mlx4_free_icm(dev, table->icm[i], use_coherent);
456 table->icm[i] = NULL;
461 * Add a reference to this ICM chunk so that it never
462 * gets freed (since it contains reserved firmware objects).
464 ++table->icm[i]->refcount;
470 for (i = 0; i < num_icm; ++i)
472 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
473 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
474 mlx4_free_icm(dev, table->icm[i], use_coherent);
482 void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
486 for (i = 0; i < table->num_icm; ++i)
488 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
489 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
490 mlx4_free_icm(dev, table->icm[i], table->coherent);