2 * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/slab.h>
38 static u32 convert_access(int acc)
40 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX4_PERM_ATOMIC : 0) |
41 (acc & IB_ACCESS_REMOTE_WRITE ? MLX4_PERM_REMOTE_WRITE : 0) |
42 (acc & IB_ACCESS_REMOTE_READ ? MLX4_PERM_REMOTE_READ : 0) |
43 (acc & IB_ACCESS_LOCAL_WRITE ? MLX4_PERM_LOCAL_WRITE : 0) |
44 (acc & IB_ACCESS_MW_BIND ? MLX4_PERM_BIND_MW : 0) |
48 static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type)
51 case IB_MW_TYPE_1: return MLX4_MW_TYPE_1;
52 case IB_MW_TYPE_2: return MLX4_MW_TYPE_2;
57 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)
59 struct mlx4_ib_mr *mr;
62 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
64 return ERR_PTR(-ENOMEM);
66 err = mlx4_mr_alloc(to_mdev(pd->device)->dev, to_mpd(pd)->pdn, 0,
67 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
71 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
75 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
81 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
89 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
97 struct scatterlist *sg;
99 pages = (u64 *) __get_free_page(GFP_KERNEL);
105 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
106 len = sg_dma_len(sg) >> mtt->page_shift;
107 for (k = 0; k < len; ++k) {
108 pages[i++] = sg_dma_address(sg) +
111 * Be friendly to mlx4_write_mtt() and
112 * pass it chunks of appropriate size.
114 if (i == PAGE_SIZE / sizeof (u64)) {
115 err = mlx4_write_mtt(dev->dev, mtt, n,
126 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages);
129 free_page((unsigned long) pages);
133 static struct ib_umem *mlx4_get_umem_mr(struct ib_ucontext *context, u64 start,
134 u64 length, u64 virt_addr,
138 * Force registering the memory as writable if the underlying pages
139 * are writable. This is so rereg can change the access permissions
140 * from readable to writable without having to run through ib_umem_get
143 if (!ib_access_writable(access_flags)) {
144 struct vm_area_struct *vma;
146 down_read(¤t->mm->mmap_sem);
148 * FIXME: Ideally this would iterate over all the vmas that
149 * cover the memory, but for now it requires a single vma to
150 * entirely cover the MR to support RO mappings.
152 vma = find_vma(current->mm, start);
153 if (vma && vma->vm_end >= start + length &&
154 vma->vm_start <= start) {
155 if (vma->vm_flags & VM_WRITE)
156 access_flags |= IB_ACCESS_LOCAL_WRITE;
158 access_flags |= IB_ACCESS_LOCAL_WRITE;
161 up_read(¤t->mm->mmap_sem);
164 return ib_umem_get(context, start, length, access_flags, 0);
167 struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
168 u64 virt_addr, int access_flags,
169 struct ib_udata *udata)
171 struct mlx4_ib_dev *dev = to_mdev(pd->device);
172 struct mlx4_ib_mr *mr;
177 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
179 return ERR_PTR(-ENOMEM);
181 mr->umem = mlx4_get_umem_mr(pd->uobject->context, start, length,
182 virt_addr, access_flags);
183 if (IS_ERR(mr->umem)) {
184 err = PTR_ERR(mr->umem);
188 n = ib_umem_page_count(mr->umem);
189 shift = ilog2(mr->umem->page_size);
191 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, virt_addr, length,
192 convert_access(access_flags), n, shift, &mr->mmr);
196 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
200 err = mlx4_mr_enable(dev->dev, &mr->mmr);
204 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
209 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
212 ib_umem_release(mr->umem);
220 int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
221 u64 start, u64 length, u64 virt_addr,
222 int mr_access_flags, struct ib_pd *pd,
223 struct ib_udata *udata)
225 struct mlx4_ib_dev *dev = to_mdev(mr->device);
226 struct mlx4_ib_mr *mmr = to_mmr(mr);
227 struct mlx4_mpt_entry *mpt_entry;
228 struct mlx4_mpt_entry **pmpt_entry = &mpt_entry;
231 /* Since we synchronize this call and mlx4_ib_dereg_mr via uverbs,
232 * we assume that the calls can't run concurrently. Otherwise, a
235 err = mlx4_mr_hw_get_mpt(dev->dev, &mmr->mmr, &pmpt_entry);
240 if (flags & IB_MR_REREG_PD) {
241 err = mlx4_mr_hw_change_pd(dev->dev, *pmpt_entry,
245 goto release_mpt_entry;
248 if (flags & IB_MR_REREG_ACCESS) {
249 if (ib_access_writable(mr_access_flags) &&
250 !mmr->umem->writable) {
252 goto release_mpt_entry;
255 err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
256 convert_access(mr_access_flags));
259 goto release_mpt_entry;
262 if (flags & IB_MR_REREG_TRANS) {
266 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
267 ib_umem_release(mmr->umem);
269 mlx4_get_umem_mr(mr->uobject->context, start, length,
270 virt_addr, mr_access_flags);
271 if (IS_ERR(mmr->umem)) {
272 err = PTR_ERR(mmr->umem);
273 /* Prevent mlx4_ib_dereg_mr from free'ing invalid pointer */
275 goto release_mpt_entry;
277 n = ib_umem_page_count(mmr->umem);
278 shift = ilog2(mmr->umem->page_size);
280 err = mlx4_mr_rereg_mem_write(dev->dev, &mmr->mmr,
281 virt_addr, length, n, shift,
284 ib_umem_release(mmr->umem);
285 goto release_mpt_entry;
287 mmr->mmr.iova = virt_addr;
288 mmr->mmr.size = length;
290 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem);
292 mlx4_mr_rereg_mem_cleanup(dev->dev, &mmr->mmr);
293 ib_umem_release(mmr->umem);
294 goto release_mpt_entry;
298 /* If we couldn't transfer the MR to the HCA, just remember to
299 * return a failure. But dereg_mr will free the resources.
301 err = mlx4_mr_hw_write_mpt(dev->dev, &mmr->mmr, pmpt_entry);
302 if (!err && flags & IB_MR_REREG_ACCESS)
303 mmr->mmr.access = mr_access_flags;
306 mlx4_mr_hw_put_mpt(dev->dev, pmpt_entry);
312 mlx4_alloc_priv_pages(struct ib_device *device,
313 struct mlx4_ib_mr *mr,
316 int size = max_pages * sizeof(u64);
320 add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
322 mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
323 if (!mr->pages_alloc)
326 mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
328 mr->page_map = dma_map_single(device->dma_device, mr->pages,
329 size, DMA_TO_DEVICE);
331 if (dma_mapping_error(device->dma_device, mr->page_map)) {
338 kfree(mr->pages_alloc);
344 mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
347 struct ib_device *device = mr->ibmr.device;
348 int size = mr->max_pages * sizeof(u64);
350 dma_unmap_single(device->dma_device, mr->page_map,
351 size, DMA_TO_DEVICE);
352 kfree(mr->pages_alloc);
357 int mlx4_ib_dereg_mr(struct ib_mr *ibmr)
359 struct mlx4_ib_mr *mr = to_mmr(ibmr);
362 mlx4_free_priv_pages(mr);
364 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
368 ib_umem_release(mr->umem);
374 struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)
376 struct mlx4_ib_dev *dev = to_mdev(pd->device);
377 struct mlx4_ib_mw *mw;
380 mw = kmalloc(sizeof(*mw), GFP_KERNEL);
382 return ERR_PTR(-ENOMEM);
384 err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn,
385 to_mlx4_type(type), &mw->mmw);
389 err = mlx4_mw_enable(dev->dev, &mw->mmw);
393 mw->ibmw.rkey = mw->mmw.key;
398 mlx4_mw_free(dev->dev, &mw->mmw);
406 int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw,
407 struct ib_mw_bind *mw_bind)
409 struct ib_bind_mw_wr wr;
410 struct ib_send_wr *bad_wr;
413 memset(&wr, 0, sizeof(wr));
414 wr.wr.opcode = IB_WR_BIND_MW;
415 wr.wr.wr_id = mw_bind->wr_id;
416 wr.wr.send_flags = mw_bind->send_flags;
418 wr.bind_info = mw_bind->bind_info;
419 wr.rkey = ib_inc_rkey(mw->rkey);
421 ret = mlx4_ib_post_send(qp, &wr.wr, &bad_wr);
428 int mlx4_ib_dealloc_mw(struct ib_mw *ibmw)
430 struct mlx4_ib_mw *mw = to_mmw(ibmw);
432 mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw);
438 struct ib_mr *mlx4_ib_alloc_mr(struct ib_pd *pd,
439 enum ib_mr_type mr_type,
442 struct mlx4_ib_dev *dev = to_mdev(pd->device);
443 struct mlx4_ib_mr *mr;
446 if (mr_type != IB_MR_TYPE_MEM_REG ||
447 max_num_sg > MLX4_MAX_FAST_REG_PAGES)
448 return ERR_PTR(-EINVAL);
450 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
452 return ERR_PTR(-ENOMEM);
454 err = mlx4_mr_alloc(dev->dev, to_mpd(pd)->pdn, 0, 0, 0,
455 max_num_sg, 0, &mr->mmr);
459 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
463 mr->max_pages = max_num_sg;
464 err = mlx4_mr_enable(dev->dev, &mr->mmr);
468 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
474 mr->ibmr.device = pd->device;
475 mlx4_free_priv_pages(mr);
477 (void) mlx4_mr_free(dev->dev, &mr->mmr);
483 struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,
484 struct ib_fmr_attr *fmr_attr)
486 struct mlx4_ib_dev *dev = to_mdev(pd->device);
487 struct mlx4_ib_fmr *fmr;
490 fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
492 return ERR_PTR(-ENOMEM);
494 err = mlx4_fmr_alloc(dev->dev, to_mpd(pd)->pdn, convert_access(acc),
495 fmr_attr->max_pages, fmr_attr->max_maps,
496 fmr_attr->page_shift, &fmr->mfmr);
500 err = mlx4_fmr_enable(to_mdev(pd->device)->dev, &fmr->mfmr);
504 fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mfmr.mr.key;
509 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);
517 int mlx4_ib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
518 int npages, u64 iova)
520 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
521 struct mlx4_ib_dev *dev = to_mdev(ifmr->ibfmr.device);
523 return mlx4_map_phys_fmr(dev->dev, &ifmr->mfmr, page_list, npages, iova,
524 &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
527 int mlx4_ib_unmap_fmr(struct list_head *fmr_list)
529 struct ib_fmr *ibfmr;
531 struct mlx4_dev *mdev = NULL;
533 list_for_each_entry(ibfmr, fmr_list, list) {
534 if (mdev && to_mdev(ibfmr->device)->dev != mdev)
536 mdev = to_mdev(ibfmr->device)->dev;
542 list_for_each_entry(ibfmr, fmr_list, list) {
543 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
545 mlx4_fmr_unmap(mdev, &ifmr->mfmr, &ifmr->ibfmr.lkey, &ifmr->ibfmr.rkey);
549 * Make sure all MPT status updates are visible before issuing
550 * SYNC_TPT firmware command.
554 err = mlx4_SYNC_TPT(mdev);
556 pr_warn("SYNC_TPT error %d when "
557 "unmapping FMRs\n", err);
562 int mlx4_ib_fmr_dealloc(struct ib_fmr *ibfmr)
564 struct mlx4_ib_fmr *ifmr = to_mfmr(ibfmr);
565 struct mlx4_ib_dev *dev = to_mdev(ibfmr->device);
568 err = mlx4_fmr_free(dev->dev, &ifmr->mfmr);
576 static int mlx4_set_page(struct ib_mr *ibmr, u64 addr)
578 struct mlx4_ib_mr *mr = to_mmr(ibmr);
580 if (unlikely(mr->npages == mr->max_pages))
583 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
588 int mlx4_ib_map_mr_sg(struct ib_mr *ibmr,
589 struct scatterlist *sg,
592 struct mlx4_ib_mr *mr = to_mmr(ibmr);
597 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
598 sizeof(u64) * mr->max_pages,
601 rc = ib_sg_to_pages(ibmr, sg, sg_nents, mlx4_set_page);
603 ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
604 sizeof(u64) * mr->max_pages,