2 * Copyright (c) 2016 Hisilicon Limited.
3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/platform_device.h>
35 #include <linux/vmalloc.h>
36 #include <rdma/ib_umem.h>
37 #include "hns_roce_device.h"
38 #include "hns_roce_cmd.h"
39 #include "hns_roce_hem.h"
41 static u32 hw_index_to_key(unsigned long ind)
43 return (u32)(ind >> 24) | (ind << 8);
46 unsigned long key_to_hw_index(u32 key)
48 return (key << 24) | (key >> 8);
50 EXPORT_SYMBOL_GPL(key_to_hw_index);
52 static int hns_roce_sw2hw_mpt(struct hns_roce_dev *hr_dev,
53 struct hns_roce_cmd_mailbox *mailbox,
54 unsigned long mpt_index)
56 return hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, mpt_index, 0,
57 HNS_ROCE_CMD_SW2HW_MPT,
58 HNS_ROCE_CMD_TIMEOUT_MSECS);
61 int hns_roce_hw2sw_mpt(struct hns_roce_dev *hr_dev,
62 struct hns_roce_cmd_mailbox *mailbox,
63 unsigned long mpt_index)
65 return hns_roce_cmd_mbox(hr_dev, 0, mailbox ? mailbox->dma : 0,
66 mpt_index, !mailbox, HNS_ROCE_CMD_HW2SW_MPT,
67 HNS_ROCE_CMD_TIMEOUT_MSECS);
69 EXPORT_SYMBOL_GPL(hns_roce_hw2sw_mpt);
71 static int hns_roce_buddy_alloc(struct hns_roce_buddy *buddy, int order,
77 spin_lock(&buddy->lock);
79 for (o = order; o <= buddy->max_order; ++o) {
80 if (buddy->num_free[o]) {
81 m = 1 << (buddy->max_order - o);
82 *seg = find_first_bit(buddy->bits[o], m);
87 spin_unlock(&buddy->lock);
91 clear_bit(*seg, buddy->bits[o]);
97 set_bit(*seg ^ 1, buddy->bits[o]);
101 spin_unlock(&buddy->lock);
107 static void hns_roce_buddy_free(struct hns_roce_buddy *buddy, unsigned long seg,
112 spin_lock(&buddy->lock);
114 while (test_bit(seg ^ 1, buddy->bits[order])) {
115 clear_bit(seg ^ 1, buddy->bits[order]);
116 --buddy->num_free[order];
121 set_bit(seg, buddy->bits[order]);
122 ++buddy->num_free[order];
124 spin_unlock(&buddy->lock);
127 static int hns_roce_buddy_init(struct hns_roce_buddy *buddy, int max_order)
131 buddy->max_order = max_order;
132 spin_lock_init(&buddy->lock);
133 buddy->bits = kcalloc(buddy->max_order + 1,
134 sizeof(*buddy->bits),
136 buddy->num_free = kcalloc(buddy->max_order + 1,
137 sizeof(*buddy->num_free),
139 if (!buddy->bits || !buddy->num_free)
142 for (i = 0; i <= buddy->max_order; ++i) {
143 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
144 buddy->bits[i] = kcalloc(s, sizeof(long), GFP_KERNEL |
146 if (!buddy->bits[i]) {
147 buddy->bits[i] = vzalloc(array_size(s, sizeof(long)));
153 set_bit(0, buddy->bits[buddy->max_order]);
154 buddy->num_free[buddy->max_order] = 1;
159 for (i = 0; i <= buddy->max_order; ++i)
160 kvfree(buddy->bits[i]);
164 kfree(buddy->num_free);
168 static void hns_roce_buddy_cleanup(struct hns_roce_buddy *buddy)
172 for (i = 0; i <= buddy->max_order; ++i)
173 kvfree(buddy->bits[i]);
176 kfree(buddy->num_free);
179 static int hns_roce_alloc_mtt_range(struct hns_roce_dev *hr_dev, int order,
180 unsigned long *seg, u32 mtt_type)
182 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
183 struct hns_roce_hem_table *table;
184 struct hns_roce_buddy *buddy;
187 if (mtt_type == MTT_TYPE_WQE) {
188 buddy = &mr_table->mtt_buddy;
189 table = &mr_table->mtt_table;
191 buddy = &mr_table->mtt_cqe_buddy;
192 table = &mr_table->mtt_cqe_table;
195 ret = hns_roce_buddy_alloc(buddy, order, seg);
199 if (hns_roce_table_get_range(hr_dev, table, *seg,
200 *seg + (1 << order) - 1)) {
201 hns_roce_buddy_free(buddy, *seg, order);
208 int hns_roce_mtt_init(struct hns_roce_dev *hr_dev, int npages, int page_shift,
209 struct hns_roce_mtt *mtt)
214 /* Page num is zero, correspond to DMA memory register */
217 mtt->page_shift = HNS_ROCE_HEM_PAGE_SHIFT;
221 /* Note: if page_shift is zero, FAST memory register */
222 mtt->page_shift = page_shift;
224 /* Compute MTT entry necessary */
225 for (mtt->order = 0, i = HNS_ROCE_MTT_ENTRY_PER_SEG; i < npages;
229 /* Allocate MTT entry */
230 ret = hns_roce_alloc_mtt_range(hr_dev, mtt->order, &mtt->first_seg,
238 void hns_roce_mtt_cleanup(struct hns_roce_dev *hr_dev, struct hns_roce_mtt *mtt)
240 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
245 if (mtt->mtt_type == MTT_TYPE_WQE) {
246 hns_roce_buddy_free(&mr_table->mtt_buddy, mtt->first_seg,
248 hns_roce_table_put_range(hr_dev, &mr_table->mtt_table,
250 mtt->first_seg + (1 << mtt->order) - 1);
252 hns_roce_buddy_free(&mr_table->mtt_cqe_buddy, mtt->first_seg,
254 hns_roce_table_put_range(hr_dev, &mr_table->mtt_cqe_table,
256 mtt->first_seg + (1 << mtt->order) - 1);
259 EXPORT_SYMBOL_GPL(hns_roce_mtt_cleanup);
261 static void hns_roce_loop_free(struct hns_roce_dev *hr_dev,
262 struct hns_roce_mr *mr, int err_loop_index,
263 int loop_i, int loop_j)
265 struct device *dev = hr_dev->dev;
271 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
272 mhop_num = hr_dev->caps.pbl_hop_num;
275 if (mhop_num == 3 && err_loop_index == 2) {
276 for (; i >= 0; i--) {
277 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
278 mr->pbl_l1_dma_addr[i]);
280 for (j = 0; j < pbl_bt_sz / 8; j++) {
281 if (i == loop_i && j >= loop_j)
284 bt_idx = i * pbl_bt_sz / 8 + j;
285 dma_free_coherent(dev, pbl_bt_sz,
286 mr->pbl_bt_l2[bt_idx],
287 mr->pbl_l2_dma_addr[bt_idx]);
290 } else if (mhop_num == 3 && err_loop_index == 1) {
291 for (i -= 1; i >= 0; i--) {
292 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
293 mr->pbl_l1_dma_addr[i]);
295 for (j = 0; j < pbl_bt_sz / 8; j++) {
296 bt_idx = i * pbl_bt_sz / 8 + j;
297 dma_free_coherent(dev, pbl_bt_sz,
298 mr->pbl_bt_l2[bt_idx],
299 mr->pbl_l2_dma_addr[bt_idx]);
302 } else if (mhop_num == 2 && err_loop_index == 1) {
303 for (i -= 1; i >= 0; i--)
304 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
305 mr->pbl_l1_dma_addr[i]);
307 dev_warn(dev, "not support: mhop_num=%d, err_loop_index=%d.",
308 mhop_num, err_loop_index);
312 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0, mr->pbl_l0_dma_addr);
313 mr->pbl_bt_l0 = NULL;
314 mr->pbl_l0_dma_addr = 0;
317 /* PBL multi hop addressing */
318 static int hns_roce_mhop_alloc(struct hns_roce_dev *hr_dev, int npages,
319 struct hns_roce_mr *mr)
321 struct device *dev = hr_dev->dev;
322 int mr_alloc_done = 0;
323 int npages_allocated;
332 mhop_num = hr_dev->caps.pbl_hop_num;
333 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
334 pbl_last_bt_num = (npages + pbl_bt_sz / 8 - 1) / (pbl_bt_sz / 8);
336 if (mhop_num == HNS_ROCE_HOP_NUM_0)
341 if (npages > pbl_bt_sz / 8) {
342 dev_err(dev, "npages %d is larger than buf_pg_sz!",
346 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
352 mr->pbl_size = npages;
353 mr->pbl_ba = mr->pbl_dma_addr;
354 mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
355 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
356 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
360 mr->pbl_l1_dma_addr = kcalloc(pbl_bt_sz / 8,
361 sizeof(*mr->pbl_l1_dma_addr),
363 if (!mr->pbl_l1_dma_addr)
366 mr->pbl_bt_l1 = kcalloc(pbl_bt_sz / 8, sizeof(*mr->pbl_bt_l1),
369 goto err_kcalloc_bt_l1;
372 mr->pbl_l2_dma_addr = kcalloc(pbl_last_bt_num,
373 sizeof(*mr->pbl_l2_dma_addr),
375 if (!mr->pbl_l2_dma_addr)
376 goto err_kcalloc_l2_dma;
378 mr->pbl_bt_l2 = kcalloc(pbl_last_bt_num,
379 sizeof(*mr->pbl_bt_l2),
382 goto err_kcalloc_bt_l2;
386 mr->pbl_bt_l0 = dma_alloc_coherent(dev, pbl_bt_sz,
387 &(mr->pbl_l0_dma_addr),
390 goto err_dma_alloc_l0;
394 for (i = 0; i < pbl_bt_sz / 8; i++) {
395 if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
398 npages_allocated = i * (pbl_bt_sz / 8);
399 size = (npages - npages_allocated) * 8;
401 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, size,
402 &(mr->pbl_l1_dma_addr[i]),
404 if (!mr->pbl_bt_l1[i]) {
405 hns_roce_loop_free(hr_dev, mr, 1, i, 0);
406 goto err_dma_alloc_l0;
409 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
412 if (pbl_bt_cnt >= pbl_last_bt_num)
415 } else if (mhop_num == 3) {
416 /* alloc L1, L2 BT */
417 for (i = 0; i < pbl_bt_sz / 8; i++) {
418 mr->pbl_bt_l1[i] = dma_alloc_coherent(dev, pbl_bt_sz,
419 &(mr->pbl_l1_dma_addr[i]),
421 if (!mr->pbl_bt_l1[i]) {
422 hns_roce_loop_free(hr_dev, mr, 1, i, 0);
423 goto err_dma_alloc_l0;
426 *(mr->pbl_bt_l0 + i) = mr->pbl_l1_dma_addr[i];
428 for (j = 0; j < pbl_bt_sz / 8; j++) {
429 bt_idx = i * pbl_bt_sz / 8 + j;
431 if (pbl_bt_cnt + 1 < pbl_last_bt_num) {
434 npages_allocated = bt_idx *
436 size = (npages - npages_allocated) * 8;
438 mr->pbl_bt_l2[bt_idx] = dma_alloc_coherent(
440 &(mr->pbl_l2_dma_addr[bt_idx]),
442 if (!mr->pbl_bt_l2[bt_idx]) {
443 hns_roce_loop_free(hr_dev, mr, 2, i, j);
444 goto err_dma_alloc_l0;
447 *(mr->pbl_bt_l1[i] + j) =
448 mr->pbl_l2_dma_addr[bt_idx];
451 if (pbl_bt_cnt >= pbl_last_bt_num) {
462 mr->l0_chunk_last_num = i + 1;
464 mr->l1_chunk_last_num = j + 1;
466 mr->pbl_size = npages;
467 mr->pbl_ba = mr->pbl_l0_dma_addr;
468 mr->pbl_hop_num = hr_dev->caps.pbl_hop_num;
469 mr->pbl_ba_pg_sz = hr_dev->caps.pbl_ba_pg_sz;
470 mr->pbl_buf_pg_sz = hr_dev->caps.pbl_buf_pg_sz;
475 kfree(mr->pbl_bt_l2);
476 mr->pbl_bt_l2 = NULL;
479 kfree(mr->pbl_l2_dma_addr);
480 mr->pbl_l2_dma_addr = NULL;
483 kfree(mr->pbl_bt_l1);
484 mr->pbl_bt_l1 = NULL;
487 kfree(mr->pbl_l1_dma_addr);
488 mr->pbl_l1_dma_addr = NULL;
493 static int hns_roce_mr_alloc(struct hns_roce_dev *hr_dev, u32 pd, u64 iova,
494 u64 size, u32 access, int npages,
495 struct hns_roce_mr *mr)
497 struct device *dev = hr_dev->dev;
498 unsigned long index = 0;
501 /* Allocate a key for mr from mr_table */
502 ret = hns_roce_bitmap_alloc(&hr_dev->mr_table.mtpt_bitmap, &index);
506 mr->iova = iova; /* MR va starting addr */
507 mr->size = size; /* MR addr range */
508 mr->pd = pd; /* MR num */
509 mr->access = access; /* MR access permit */
510 mr->enabled = 0; /* MR active status */
511 mr->key = hw_index_to_key(index); /* MR key */
514 mr->type = MR_TYPE_DMA;
516 mr->pbl_dma_addr = 0;
517 /* PBL multi-hop addressing parameters */
518 mr->pbl_bt_l2 = NULL;
519 mr->pbl_bt_l1 = NULL;
520 mr->pbl_bt_l0 = NULL;
521 mr->pbl_l2_dma_addr = NULL;
522 mr->pbl_l1_dma_addr = NULL;
523 mr->pbl_l0_dma_addr = 0;
525 mr->type = MR_TYPE_MR;
526 if (!hr_dev->caps.pbl_hop_num) {
527 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
533 ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
540 static void hns_roce_mhop_free(struct hns_roce_dev *hr_dev,
541 struct hns_roce_mr *mr)
543 struct device *dev = hr_dev->dev;
544 int npages_allocated;
551 npages = ib_umem_page_count(mr->umem);
552 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
553 mhop_num = hr_dev->caps.pbl_hop_num;
555 if (mhop_num == HNS_ROCE_HOP_NUM_0)
560 dma_free_coherent(dev, (unsigned int)(npages * 8),
561 mr->pbl_buf, mr->pbl_dma_addr);
565 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l0,
566 mr->pbl_l0_dma_addr);
569 for (i = 0; i < mr->l0_chunk_last_num; i++) {
570 if (i == mr->l0_chunk_last_num - 1) {
571 npages_allocated = i * (pbl_bt_sz / 8);
573 dma_free_coherent(dev,
574 (npages - npages_allocated) * 8,
576 mr->pbl_l1_dma_addr[i]);
581 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
582 mr->pbl_l1_dma_addr[i]);
584 } else if (mhop_num == 3) {
585 for (i = 0; i < mr->l0_chunk_last_num; i++) {
586 dma_free_coherent(dev, pbl_bt_sz, mr->pbl_bt_l1[i],
587 mr->pbl_l1_dma_addr[i]);
589 for (j = 0; j < pbl_bt_sz / 8; j++) {
590 bt_idx = i * (pbl_bt_sz / 8) + j;
592 if ((i == mr->l0_chunk_last_num - 1)
593 && j == mr->l1_chunk_last_num - 1) {
594 npages_allocated = bt_idx *
597 dma_free_coherent(dev,
598 (npages - npages_allocated) * 8,
599 mr->pbl_bt_l2[bt_idx],
600 mr->pbl_l2_dma_addr[bt_idx]);
605 dma_free_coherent(dev, pbl_bt_sz,
606 mr->pbl_bt_l2[bt_idx],
607 mr->pbl_l2_dma_addr[bt_idx]);
612 kfree(mr->pbl_bt_l1);
613 kfree(mr->pbl_l1_dma_addr);
614 mr->pbl_bt_l1 = NULL;
615 mr->pbl_l1_dma_addr = NULL;
617 kfree(mr->pbl_bt_l2);
618 kfree(mr->pbl_l2_dma_addr);
619 mr->pbl_bt_l2 = NULL;
620 mr->pbl_l2_dma_addr = NULL;
624 static void hns_roce_mr_free(struct hns_roce_dev *hr_dev,
625 struct hns_roce_mr *mr)
627 struct device *dev = hr_dev->dev;
632 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, key_to_hw_index(mr->key)
633 & (hr_dev->caps.num_mtpts - 1));
635 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
638 if (mr->size != ~0ULL) {
639 npages = ib_umem_page_count(mr->umem);
641 if (!hr_dev->caps.pbl_hop_num)
642 dma_free_coherent(dev, (unsigned int)(npages * 8),
643 mr->pbl_buf, mr->pbl_dma_addr);
645 hns_roce_mhop_free(hr_dev, mr);
649 hns_roce_table_put(hr_dev, &hr_dev->mr_table.mtpt_table,
650 key_to_hw_index(mr->key));
652 hns_roce_bitmap_free(&hr_dev->mr_table.mtpt_bitmap,
653 key_to_hw_index(mr->key), BITMAP_NO_RR);
656 static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
657 struct hns_roce_mr *mr)
660 unsigned long mtpt_idx = key_to_hw_index(mr->key);
661 struct device *dev = hr_dev->dev;
662 struct hns_roce_cmd_mailbox *mailbox;
663 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
665 /* Prepare HEM entry memory */
666 ret = hns_roce_table_get(hr_dev, &mr_table->mtpt_table, mtpt_idx);
670 /* Allocate mailbox memory */
671 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
672 if (IS_ERR(mailbox)) {
673 ret = PTR_ERR(mailbox);
677 ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
679 dev_err(dev, "Write mtpt fail!\n");
683 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox,
684 mtpt_idx & (hr_dev->caps.num_mtpts - 1));
686 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
691 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
696 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
699 hns_roce_table_put(hr_dev, &mr_table->mtpt_table, mtpt_idx);
703 static int hns_roce_write_mtt_chunk(struct hns_roce_dev *hr_dev,
704 struct hns_roce_mtt *mtt, u32 start_index,
705 u32 npages, u64 *page_list)
707 struct hns_roce_hem_table *table;
708 dma_addr_t dma_handle;
713 if (mtt->mtt_type == MTT_TYPE_WQE)
714 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
716 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
718 /* All MTTs must fit in the same page */
719 if (start_index / (bt_page_size / sizeof(u64)) !=
720 (start_index + npages - 1) / (bt_page_size / sizeof(u64)))
723 if (start_index & (HNS_ROCE_MTT_ENTRY_PER_SEG - 1))
726 if (mtt->mtt_type == MTT_TYPE_WQE)
727 table = &hr_dev->mr_table.mtt_table;
729 table = &hr_dev->mr_table.mtt_cqe_table;
731 mtts = hns_roce_table_find(hr_dev, table,
733 start_index / HNS_ROCE_MTT_ENTRY_PER_SEG,
738 /* Save page addr, low 12 bits : 0 */
739 for (i = 0; i < npages; ++i) {
740 if (!hr_dev->caps.mtt_hop_num)
741 mtts[i] = cpu_to_le64(page_list[i] >> PAGE_ADDR_SHIFT);
743 mtts[i] = cpu_to_le64(page_list[i]);
749 static int hns_roce_write_mtt(struct hns_roce_dev *hr_dev,
750 struct hns_roce_mtt *mtt, u32 start_index,
751 u32 npages, u64 *page_list)
760 if (mtt->mtt_type == MTT_TYPE_WQE)
761 bt_page_size = 1 << (hr_dev->caps.mtt_ba_pg_sz + PAGE_SHIFT);
763 bt_page_size = 1 << (hr_dev->caps.cqe_ba_pg_sz + PAGE_SHIFT);
766 chunk = min_t(int, bt_page_size / sizeof(u64), npages);
768 ret = hns_roce_write_mtt_chunk(hr_dev, mtt, start_index, chunk,
774 start_index += chunk;
781 int hns_roce_buf_write_mtt(struct hns_roce_dev *hr_dev,
782 struct hns_roce_mtt *mtt, struct hns_roce_buf *buf)
788 page_list = kmalloc_array(buf->npages, sizeof(*page_list), GFP_KERNEL);
792 for (i = 0; i < buf->npages; ++i) {
794 page_list[i] = buf->direct.map + (i << buf->page_shift);
796 page_list[i] = buf->page_list[i].map;
799 ret = hns_roce_write_mtt(hr_dev, mtt, 0, buf->npages, page_list);
806 int hns_roce_init_mr_table(struct hns_roce_dev *hr_dev)
808 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
811 ret = hns_roce_bitmap_init(&mr_table->mtpt_bitmap,
812 hr_dev->caps.num_mtpts,
813 hr_dev->caps.num_mtpts - 1,
814 hr_dev->caps.reserved_mrws, 0);
818 ret = hns_roce_buddy_init(&mr_table->mtt_buddy,
819 ilog2(hr_dev->caps.num_mtt_segs));
823 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) {
824 ret = hns_roce_buddy_init(&mr_table->mtt_cqe_buddy,
825 ilog2(hr_dev->caps.num_cqe_segs));
832 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
835 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
839 void hns_roce_cleanup_mr_table(struct hns_roce_dev *hr_dev)
841 struct hns_roce_mr_table *mr_table = &hr_dev->mr_table;
843 hns_roce_buddy_cleanup(&mr_table->mtt_buddy);
844 if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
845 hns_roce_buddy_cleanup(&mr_table->mtt_cqe_buddy);
846 hns_roce_bitmap_cleanup(&mr_table->mtpt_bitmap);
849 struct ib_mr *hns_roce_get_dma_mr(struct ib_pd *pd, int acc)
851 struct hns_roce_mr *mr;
854 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
856 return ERR_PTR(-ENOMEM);
858 /* Allocate memory region key */
859 ret = hns_roce_mr_alloc(to_hr_dev(pd->device), to_hr_pd(pd)->pdn, 0,
864 ret = hns_roce_mr_enable(to_hr_dev(pd->device), mr);
868 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
874 hns_roce_mr_free(to_hr_dev(pd->device), mr);
881 int hns_roce_ib_umem_write_mtt(struct hns_roce_dev *hr_dev,
882 struct hns_roce_mtt *mtt, struct ib_umem *umem)
884 struct device *dev = hr_dev->dev;
885 struct scatterlist *sg;
896 order = mtt->mtt_type == MTT_TYPE_WQE ? hr_dev->caps.mtt_ba_pg_sz :
897 hr_dev->caps.cqe_ba_pg_sz;
898 bt_page_size = 1 << (order + PAGE_SHIFT);
900 pages = (u64 *) __get_free_pages(GFP_KERNEL, order);
906 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
907 len = sg_dma_len(sg) >> PAGE_SHIFT;
908 for (k = 0; k < len; ++k) {
910 sg_dma_address(sg) + (k << umem->page_shift);
911 if (!(npage % (1 << (mtt->page_shift - PAGE_SHIFT)))) {
912 if (page_addr & ((1 << mtt->page_shift) - 1)) {
913 dev_err(dev, "page_addr 0x%llx is not page_shift %d alignment!\n",
914 page_addr, mtt->page_shift);
918 pages[i++] = page_addr;
921 if (i == bt_page_size / sizeof(u64)) {
922 ret = hns_roce_write_mtt(hr_dev, mtt, n, i,
933 ret = hns_roce_write_mtt(hr_dev, mtt, n, i, pages);
936 free_pages((unsigned long) pages, order);
940 static int hns_roce_ib_umem_write_mr(struct hns_roce_dev *hr_dev,
941 struct hns_roce_mr *mr,
942 struct ib_umem *umem)
944 struct scatterlist *sg;
951 if (hr_dev->caps.pbl_hop_num == HNS_ROCE_HOP_NUM_0)
954 pbl_bt_sz = 1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT);
955 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
956 len = sg_dma_len(sg) >> PAGE_SHIFT;
957 for (k = 0; k < len; ++k) {
958 page_addr = sg_dma_address(sg) +
959 (k << umem->page_shift);
961 if (!hr_dev->caps.pbl_hop_num) {
962 mr->pbl_buf[i++] = page_addr >> 12;
963 } else if (hr_dev->caps.pbl_hop_num == 1) {
964 mr->pbl_buf[i++] = page_addr;
966 if (hr_dev->caps.pbl_hop_num == 2)
967 mr->pbl_bt_l1[i][j] = page_addr;
968 else if (hr_dev->caps.pbl_hop_num == 3)
969 mr->pbl_bt_l2[i][j] = page_addr;
972 if (j >= (pbl_bt_sz / 8)) {
986 struct ib_mr *hns_roce_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
987 u64 virt_addr, int access_flags,
988 struct ib_udata *udata)
990 struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
991 struct device *dev = hr_dev->dev;
992 struct hns_roce_mr *mr;
998 mr = kmalloc(sizeof(*mr), GFP_KERNEL);
1000 return ERR_PTR(-ENOMEM);
1002 mr->umem = ib_umem_get(pd->uobject->context, start, length,
1004 if (IS_ERR(mr->umem)) {
1005 ret = PTR_ERR(mr->umem);
1009 n = ib_umem_page_count(mr->umem);
1011 if (!hr_dev->caps.pbl_hop_num) {
1012 if (n > HNS_ROCE_MAX_MTPT_PBL_NUM) {
1014 " MR len %lld err. MR is limited to 4G at most!\n",
1022 bt_size = (1 << (hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT)) / 8;
1023 for (i = 0; i < hr_dev->caps.pbl_hop_num; i++)
1024 pbl_size *= bt_size;
1027 " MR len %lld err. MR page num is limited to %lld!\n",
1034 ret = hns_roce_mr_alloc(hr_dev, to_hr_pd(pd)->pdn, virt_addr, length,
1035 access_flags, n, mr);
1039 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
1043 ret = hns_roce_mr_enable(hr_dev, mr);
1047 mr->ibmr.rkey = mr->ibmr.lkey = mr->key;
1052 hns_roce_mr_free(hr_dev, mr);
1055 ib_umem_release(mr->umem);
1059 return ERR_PTR(ret);
1062 int hns_roce_rereg_user_mr(struct ib_mr *ibmr, int flags, u64 start, u64 length,
1063 u64 virt_addr, int mr_access_flags, struct ib_pd *pd,
1064 struct ib_udata *udata)
1066 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
1067 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1068 struct hns_roce_cmd_mailbox *mailbox;
1069 struct device *dev = hr_dev->dev;
1070 unsigned long mtpt_idx;
1078 mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
1079 if (IS_ERR(mailbox))
1080 return PTR_ERR(mailbox);
1082 mtpt_idx = key_to_hw_index(mr->key) & (hr_dev->caps.num_mtpts - 1);
1083 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, mtpt_idx, 0,
1084 HNS_ROCE_CMD_QUERY_MPT,
1085 HNS_ROCE_CMD_TIMEOUT_MSECS);
1089 ret = hns_roce_hw2sw_mpt(hr_dev, NULL, mtpt_idx);
1091 dev_warn(dev, "HW2SW_MPT failed (%d)\n", ret);
1095 if (flags & IB_MR_REREG_PD)
1096 pdn = to_hr_pd(pd)->pdn;
1098 if (flags & IB_MR_REREG_TRANS) {
1099 if (mr->size != ~0ULL) {
1100 npages = ib_umem_page_count(mr->umem);
1102 if (hr_dev->caps.pbl_hop_num)
1103 hns_roce_mhop_free(hr_dev, mr);
1105 dma_free_coherent(dev, npages * 8, mr->pbl_buf,
1108 ib_umem_release(mr->umem);
1110 mr->umem = ib_umem_get(ibmr->uobject->context, start, length,
1111 mr_access_flags, 0);
1112 if (IS_ERR(mr->umem)) {
1113 ret = PTR_ERR(mr->umem);
1117 npages = ib_umem_page_count(mr->umem);
1119 if (hr_dev->caps.pbl_hop_num) {
1120 ret = hns_roce_mhop_alloc(hr_dev, npages, mr);
1124 mr->pbl_buf = dma_alloc_coherent(dev, npages * 8,
1125 &(mr->pbl_dma_addr),
1134 ret = hr_dev->hw->rereg_write_mtpt(hr_dev, mr, flags, pdn,
1135 mr_access_flags, virt_addr,
1136 length, mailbox->buf);
1138 if (flags & IB_MR_REREG_TRANS)
1144 if (flags & IB_MR_REREG_TRANS) {
1145 ret = hns_roce_ib_umem_write_mr(hr_dev, mr, mr->umem);
1147 if (mr->size != ~0ULL) {
1148 npages = ib_umem_page_count(mr->umem);
1150 if (hr_dev->caps.pbl_hop_num)
1151 hns_roce_mhop_free(hr_dev, mr);
1153 dma_free_coherent(dev, npages * 8,
1162 ret = hns_roce_sw2hw_mpt(hr_dev, mailbox, mtpt_idx);
1164 dev_err(dev, "SW2HW_MPT failed (%d)\n", ret);
1169 if (flags & IB_MR_REREG_ACCESS)
1170 mr->access = mr_access_flags;
1172 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1177 ib_umem_release(mr->umem);
1180 hns_roce_free_cmd_mailbox(hr_dev, mailbox);
1185 int hns_roce_dereg_mr(struct ib_mr *ibmr)
1187 struct hns_roce_dev *hr_dev = to_hr_dev(ibmr->device);
1188 struct hns_roce_mr *mr = to_hr_mr(ibmr);
1191 if (hr_dev->hw->dereg_mr) {
1192 ret = hr_dev->hw->dereg_mr(hr_dev, mr);
1194 hns_roce_mr_free(hr_dev, mr);
1197 ib_umem_release(mr->umem);