2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2013-2014 Mellanox Technologies. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
39 #include "iscsi_iser.h"
41 #define ISCSI_ISER_MAX_CONN 8
42 #define ISER_MAX_RX_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
43 #define ISER_MAX_TX_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_LEN + ISER_MAX_TX_LEN + \
47 static int iser_cq_poll_limit = 512;
49 static void iser_cq_tasklet_fn(unsigned long data);
50 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
52 static void iser_cq_event_callback(struct ib_event *cause, void *context)
54 iser_err("cq event %s (%d)\n",
55 ib_event_msg(cause->event), cause->event);
58 static void iser_qp_event_callback(struct ib_event *cause, void *context)
60 iser_err("qp event %s (%d)\n",
61 ib_event_msg(cause->event), cause->event);
64 static void iser_event_handler(struct ib_event_handler *handler,
65 struct ib_event *event)
67 iser_err("async event %s (%d) on device %s port %d\n",
68 ib_event_msg(event->event), event->event,
69 event->device->name, event->element.port_num);
73 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
74 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
77 * returns 0 on success, -1 on failure
79 static int iser_create_device_ib_res(struct iser_device *device)
81 struct ib_device_attr *dev_attr = &device->dev_attr;
84 ret = ib_query_device(device->ib_device, dev_attr);
86 pr_warn("Query device failed for %s\n", device->ib_device->name);
90 ret = iser_assign_reg_ops(device);
94 device->comps_used = min_t(int, num_online_cpus(),
95 device->ib_device->num_comp_vectors);
97 device->comps = kcalloc(device->comps_used, sizeof(*device->comps),
102 max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
104 iser_info("using %d CQs, device %s supports %d vectors max_cqe %d\n",
105 device->comps_used, device->ib_device->name,
106 device->ib_device->num_comp_vectors, max_cqe);
108 device->pd = ib_alloc_pd(device->ib_device);
109 if (IS_ERR(device->pd))
112 for (i = 0; i < device->comps_used; i++) {
113 struct ib_cq_init_attr cq_attr = {};
114 struct iser_comp *comp = &device->comps[i];
116 comp->device = device;
117 cq_attr.cqe = max_cqe;
118 cq_attr.comp_vector = i;
119 comp->cq = ib_create_cq(device->ib_device,
121 iser_cq_event_callback,
124 if (IS_ERR(comp->cq)) {
129 if (ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP))
132 tasklet_init(&comp->tasklet, iser_cq_tasklet_fn,
133 (unsigned long)comp);
136 if (!iser_always_reg) {
137 int access = IB_ACCESS_LOCAL_WRITE |
138 IB_ACCESS_REMOTE_WRITE |
139 IB_ACCESS_REMOTE_READ;
141 device->mr = ib_get_dma_mr(device->pd, access);
142 if (IS_ERR(device->mr))
146 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
148 if (ib_register_event_handler(&device->event_handler))
155 ib_dereg_mr(device->mr);
157 for (i = 0; i < device->comps_used; i++)
158 tasklet_kill(&device->comps[i].tasklet);
160 for (i = 0; i < device->comps_used; i++) {
161 struct iser_comp *comp = &device->comps[i];
164 ib_destroy_cq(comp->cq);
166 ib_dealloc_pd(device->pd);
168 kfree(device->comps);
170 iser_err("failed to allocate an IB resource\n");
175 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
176 * CQ and PD created with the device associated with the adapator.
178 static void iser_free_device_ib_res(struct iser_device *device)
182 for (i = 0; i < device->comps_used; i++) {
183 struct iser_comp *comp = &device->comps[i];
185 tasklet_kill(&comp->tasklet);
186 ib_destroy_cq(comp->cq);
190 (void)ib_unregister_event_handler(&device->event_handler);
192 (void)ib_dereg_mr(device->mr);
193 ib_dealloc_pd(device->pd);
195 kfree(device->comps);
196 device->comps = NULL;
203 * iser_alloc_fmr_pool - Creates FMR pool and page_vector
205 * returns 0 on success, or errno code on failure
207 int iser_alloc_fmr_pool(struct ib_conn *ib_conn,
211 struct iser_device *device = ib_conn->device;
212 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
213 struct iser_page_vec *page_vec;
214 struct iser_fr_desc *desc;
215 struct ib_fmr_pool *fmr_pool;
216 struct ib_fmr_pool_param params;
219 INIT_LIST_HEAD(&fr_pool->list);
220 spin_lock_init(&fr_pool->lock);
222 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
226 page_vec = kmalloc(sizeof(*page_vec) + (sizeof(u64) * size),
233 page_vec->pages = (u64 *)(page_vec + 1);
235 params.page_shift = SHIFT_4K;
236 params.max_pages_per_fmr = size;
237 /* make the pool size twice the max number of SCSI commands *
238 * the ML is expected to queue, watermark for unmap at 50% */
239 params.pool_size = cmds_max * 2;
240 params.dirty_watermark = cmds_max;
242 params.flush_function = NULL;
243 params.access = (IB_ACCESS_LOCAL_WRITE |
244 IB_ACCESS_REMOTE_WRITE |
245 IB_ACCESS_REMOTE_READ);
247 fmr_pool = ib_create_fmr_pool(device->pd, ¶ms);
248 if (IS_ERR(fmr_pool)) {
249 ret = PTR_ERR(fmr_pool);
250 iser_err("FMR allocation failed, err %d\n", ret);
254 desc->rsc.page_vec = page_vec;
255 desc->rsc.fmr_pool = fmr_pool;
256 list_add(&desc->list, &fr_pool->list);
269 * iser_free_fmr_pool - releases the FMR pool and page vec
271 void iser_free_fmr_pool(struct ib_conn *ib_conn)
273 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
274 struct iser_fr_desc *desc;
276 desc = list_first_entry(&fr_pool->list,
277 struct iser_fr_desc, list);
278 list_del(&desc->list);
280 iser_info("freeing conn %p fmr pool %p\n",
281 ib_conn, desc->rsc.fmr_pool);
283 ib_destroy_fmr_pool(desc->rsc.fmr_pool);
284 kfree(desc->rsc.page_vec);
289 iser_alloc_reg_res(struct ib_device *ib_device,
291 struct iser_reg_resources *res,
296 res->mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, size);
297 if (IS_ERR(res->mr)) {
298 ret = PTR_ERR(res->mr);
299 iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
308 iser_free_reg_res(struct iser_reg_resources *rsc)
310 ib_dereg_mr(rsc->mr);
314 iser_alloc_pi_ctx(struct ib_device *ib_device,
316 struct iser_fr_desc *desc,
319 struct iser_pi_context *pi_ctx = NULL;
322 desc->pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
326 pi_ctx = desc->pi_ctx;
328 ret = iser_alloc_reg_res(ib_device, pd, &pi_ctx->rsc, size);
330 iser_err("failed to allocate reg_resources\n");
331 goto alloc_reg_res_err;
334 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
335 if (IS_ERR(pi_ctx->sig_mr)) {
336 ret = PTR_ERR(pi_ctx->sig_mr);
339 pi_ctx->sig_mr_valid = 1;
340 desc->pi_ctx->sig_protected = 0;
345 iser_free_reg_res(&pi_ctx->rsc);
353 iser_free_pi_ctx(struct iser_pi_context *pi_ctx)
355 iser_free_reg_res(&pi_ctx->rsc);
356 ib_dereg_mr(pi_ctx->sig_mr);
360 static struct iser_fr_desc *
361 iser_create_fastreg_desc(struct ib_device *ib_device,
366 struct iser_fr_desc *desc;
369 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
371 return ERR_PTR(-ENOMEM);
373 ret = iser_alloc_reg_res(ib_device, pd, &desc->rsc, size);
375 goto reg_res_alloc_failure;
378 ret = iser_alloc_pi_ctx(ib_device, pd, desc, size);
380 goto pi_ctx_alloc_failure;
385 pi_ctx_alloc_failure:
386 iser_free_reg_res(&desc->rsc);
387 reg_res_alloc_failure:
394 * iser_alloc_fastreg_pool - Creates pool of fast_reg descriptors
395 * for fast registration work requests.
396 * returns 0 on success, or errno code on failure
398 int iser_alloc_fastreg_pool(struct ib_conn *ib_conn,
402 struct iser_device *device = ib_conn->device;
403 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
404 struct iser_fr_desc *desc;
407 INIT_LIST_HEAD(&fr_pool->list);
408 INIT_LIST_HEAD(&fr_pool->all_list);
409 spin_lock_init(&fr_pool->lock);
411 for (i = 0; i < cmds_max; i++) {
412 desc = iser_create_fastreg_desc(device->ib_device, device->pd,
413 ib_conn->pi_support, size);
419 list_add_tail(&desc->list, &fr_pool->list);
420 list_add_tail(&desc->all_list, &fr_pool->all_list);
427 iser_free_fastreg_pool(ib_conn);
432 * iser_free_fastreg_pool - releases the pool of fast_reg descriptors
434 void iser_free_fastreg_pool(struct ib_conn *ib_conn)
436 struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
437 struct iser_fr_desc *desc, *tmp;
440 if (list_empty(&fr_pool->all_list))
443 iser_info("freeing conn %p fr pool\n", ib_conn);
445 list_for_each_entry_safe(desc, tmp, &fr_pool->all_list, all_list) {
446 list_del(&desc->all_list);
447 iser_free_reg_res(&desc->rsc);
449 iser_free_pi_ctx(desc->pi_ctx);
454 if (i < fr_pool->size)
455 iser_warn("pool still has %d regions registered\n",
460 * iser_create_ib_conn_res - Queue-Pair (QP)
462 * returns 0 on success, -1 on failure
464 static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
466 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
468 struct iser_device *device;
469 struct ib_device_attr *dev_attr;
470 struct ib_qp_init_attr init_attr;
472 int index, min_index = 0;
474 BUG_ON(ib_conn->device == NULL);
476 device = ib_conn->device;
477 dev_attr = &device->dev_attr;
479 memset(&init_attr, 0, sizeof init_attr);
481 mutex_lock(&ig.connlist_mutex);
482 /* select the CQ with the minimal number of usages */
483 for (index = 0; index < device->comps_used; index++) {
484 if (device->comps[index].active_qps <
485 device->comps[min_index].active_qps)
488 ib_conn->comp = &device->comps[min_index];
489 ib_conn->comp->active_qps++;
490 mutex_unlock(&ig.connlist_mutex);
491 iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn);
493 init_attr.event_handler = iser_qp_event_callback;
494 init_attr.qp_context = (void *)ib_conn;
495 init_attr.send_cq = ib_conn->comp->cq;
496 init_attr.recv_cq = ib_conn->comp->cq;
497 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
498 init_attr.cap.max_send_sge = 2;
499 init_attr.cap.max_recv_sge = 1;
500 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
501 init_attr.qp_type = IB_QPT_RC;
502 if (ib_conn->pi_support) {
503 init_attr.cap.max_send_wr = ISER_QP_SIG_MAX_REQ_DTOS + 1;
504 init_attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
505 iser_conn->max_cmds =
506 ISER_GET_MAX_XMIT_CMDS(ISER_QP_SIG_MAX_REQ_DTOS);
508 if (dev_attr->max_qp_wr > ISER_QP_MAX_REQ_DTOS) {
509 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS + 1;
510 iser_conn->max_cmds =
511 ISER_GET_MAX_XMIT_CMDS(ISER_QP_MAX_REQ_DTOS);
513 init_attr.cap.max_send_wr = dev_attr->max_qp_wr;
514 iser_conn->max_cmds =
515 ISER_GET_MAX_XMIT_CMDS(dev_attr->max_qp_wr);
516 iser_dbg("device %s supports max_send_wr %d\n",
517 device->ib_device->name, dev_attr->max_qp_wr);
521 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
525 ib_conn->qp = ib_conn->cma_id->qp;
526 iser_info("setting conn %p cma_id %p qp %p\n",
527 ib_conn, ib_conn->cma_id,
528 ib_conn->cma_id->qp);
532 mutex_lock(&ig.connlist_mutex);
533 ib_conn->comp->active_qps--;
534 mutex_unlock(&ig.connlist_mutex);
535 iser_err("unable to alloc mem or create resource, err %d\n", ret);
541 * based on the resolved device node GUID see if there already allocated
542 * device for this device. If there's no such, create one.
545 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
547 struct iser_device *device;
549 mutex_lock(&ig.device_list_mutex);
551 list_for_each_entry(device, &ig.device_list, ig_list)
552 /* find if there's a match using the node GUID */
553 if (device->ib_device->node_guid == cma_id->device->node_guid)
556 device = kzalloc(sizeof *device, GFP_KERNEL);
560 /* assign this device to the device */
561 device->ib_device = cma_id->device;
562 /* init the device and link it into ig device list */
563 if (iser_create_device_ib_res(device)) {
568 list_add(&device->ig_list, &ig.device_list);
573 mutex_unlock(&ig.device_list_mutex);
577 /* if there's no demand for this device, release it */
578 static void iser_device_try_release(struct iser_device *device)
580 mutex_lock(&ig.device_list_mutex);
582 iser_info("device %p refcount %d\n", device, device->refcount);
583 if (!device->refcount) {
584 iser_free_device_ib_res(device);
585 list_del(&device->ig_list);
588 mutex_unlock(&ig.device_list_mutex);
592 * Called with state mutex held
594 static int iser_conn_state_comp_exch(struct iser_conn *iser_conn,
595 enum iser_conn_state comp,
596 enum iser_conn_state exch)
600 ret = (iser_conn->state == comp);
602 iser_conn->state = exch;
607 void iser_release_work(struct work_struct *work)
609 struct iser_conn *iser_conn;
611 iser_conn = container_of(work, struct iser_conn, release_work);
613 /* Wait for conn_stop to complete */
614 wait_for_completion(&iser_conn->stop_completion);
615 /* Wait for IB resouces cleanup to complete */
616 wait_for_completion(&iser_conn->ib_completion);
618 mutex_lock(&iser_conn->state_mutex);
619 iser_conn->state = ISER_CONN_DOWN;
620 mutex_unlock(&iser_conn->state_mutex);
622 iser_conn_release(iser_conn);
626 * iser_free_ib_conn_res - release IB related resources
627 * @iser_conn: iser connection struct
628 * @destroy: indicator if we need to try to release the
629 * iser device and memory regoins pool (only iscsi
630 * shutdown and DEVICE_REMOVAL will use this).
632 * This routine is called with the iser state mutex held
633 * so the cm_id removal is out of here. It is Safe to
634 * be invoked multiple times.
636 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
639 struct ib_conn *ib_conn = &iser_conn->ib_conn;
640 struct iser_device *device = ib_conn->device;
642 iser_info("freeing conn %p cma_id %p qp %p\n",
643 iser_conn, ib_conn->cma_id, ib_conn->qp);
645 if (ib_conn->qp != NULL) {
646 ib_conn->comp->active_qps--;
647 rdma_destroy_qp(ib_conn->cma_id);
652 if (iser_conn->rx_descs)
653 iser_free_rx_descriptors(iser_conn);
655 if (device != NULL) {
656 iser_device_try_release(device);
657 ib_conn->device = NULL;
663 * Frees all conn objects and deallocs conn descriptor
665 void iser_conn_release(struct iser_conn *iser_conn)
667 struct ib_conn *ib_conn = &iser_conn->ib_conn;
669 mutex_lock(&ig.connlist_mutex);
670 list_del(&iser_conn->conn_list);
671 mutex_unlock(&ig.connlist_mutex);
673 mutex_lock(&iser_conn->state_mutex);
674 /* In case we endup here without ep_disconnect being invoked. */
675 if (iser_conn->state != ISER_CONN_DOWN) {
676 iser_warn("iser conn %p state %d, expected state down.\n",
677 iser_conn, iser_conn->state);
678 iscsi_destroy_endpoint(iser_conn->ep);
679 iser_conn->state = ISER_CONN_DOWN;
682 * In case we never got to bind stage, we still need to
683 * release IB resources (which is safe to call more than once).
685 iser_free_ib_conn_res(iser_conn, true);
686 mutex_unlock(&iser_conn->state_mutex);
688 if (ib_conn->cma_id != NULL) {
689 rdma_destroy_id(ib_conn->cma_id);
690 ib_conn->cma_id = NULL;
697 * triggers start of the disconnect procedures and wait for them to be done
698 * Called with state mutex held
700 int iser_conn_terminate(struct iser_conn *iser_conn)
702 struct ib_conn *ib_conn = &iser_conn->ib_conn;
703 struct ib_send_wr *bad_wr;
706 /* terminate the iser conn only if the conn state is UP */
707 if (!iser_conn_state_comp_exch(iser_conn, ISER_CONN_UP,
708 ISER_CONN_TERMINATING))
711 iser_info("iser_conn %p state %d\n", iser_conn, iser_conn->state);
713 /* suspend queuing of new iscsi commands */
714 if (iser_conn->iscsi_conn)
715 iscsi_suspend_queue(iser_conn->iscsi_conn);
718 * In case we didn't already clean up the cma_id (peer initiated
719 * a disconnection), we need to Cause the CMA to change the QP
722 if (ib_conn->cma_id) {
723 err = rdma_disconnect(ib_conn->cma_id);
725 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
728 /* post an indication that all flush errors were consumed */
729 err = ib_post_send(ib_conn->qp, &ib_conn->beacon, &bad_wr);
731 iser_err("conn %p failed to post beacon", ib_conn);
735 wait_for_completion(&ib_conn->flush_comp);
742 * Called with state mutex held
744 static void iser_connect_error(struct rdma_cm_id *cma_id)
746 struct iser_conn *iser_conn;
748 iser_conn = (struct iser_conn *)cma_id->context;
749 iser_conn->state = ISER_CONN_TERMINATING;
753 iser_calc_scsi_params(struct iser_conn *iser_conn,
754 unsigned int max_sectors)
756 struct iser_device *device = iser_conn->ib_conn.device;
757 unsigned short sg_tablesize, sup_sg_tablesize;
759 sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K);
760 sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
761 device->dev_attr.max_fast_reg_page_list_len);
763 if (sg_tablesize > sup_sg_tablesize) {
764 sg_tablesize = sup_sg_tablesize;
765 iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
767 iser_conn->scsi_max_sectors = max_sectors;
770 iser_conn->scsi_sg_tablesize = sg_tablesize;
772 iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
773 iser_conn, iser_conn->scsi_sg_tablesize,
774 iser_conn->scsi_max_sectors);
778 * Called with state mutex held
780 static void iser_addr_handler(struct rdma_cm_id *cma_id)
782 struct iser_device *device;
783 struct iser_conn *iser_conn;
784 struct ib_conn *ib_conn;
787 iser_conn = (struct iser_conn *)cma_id->context;
788 if (iser_conn->state != ISER_CONN_PENDING)
792 ib_conn = &iser_conn->ib_conn;
793 device = iser_device_find_by_ib_device(cma_id);
795 iser_err("device lookup/creation failed\n");
796 iser_connect_error(cma_id);
800 ib_conn->device = device;
802 /* connection T10-PI support */
803 if (iser_pi_enable) {
804 if (!(device->dev_attr.device_cap_flags &
805 IB_DEVICE_SIGNATURE_HANDOVER)) {
806 iser_warn("T10-PI requested but not supported on %s, "
807 "continue without T10-PI\n",
808 ib_conn->device->ib_device->name);
809 ib_conn->pi_support = false;
811 ib_conn->pi_support = true;
815 iser_calc_scsi_params(iser_conn, iser_max_sectors);
817 ret = rdma_resolve_route(cma_id, 1000);
819 iser_err("resolve route failed: %d\n", ret);
820 iser_connect_error(cma_id);
826 * Called with state mutex held
828 static void iser_route_handler(struct rdma_cm_id *cma_id)
830 struct rdma_conn_param conn_param;
832 struct iser_cm_hdr req_hdr;
833 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
834 struct ib_conn *ib_conn = &iser_conn->ib_conn;
835 struct iser_device *device = ib_conn->device;
837 if (iser_conn->state != ISER_CONN_PENDING)
841 ret = iser_create_ib_conn_res(ib_conn);
845 memset(&conn_param, 0, sizeof conn_param);
846 conn_param.responder_resources = device->dev_attr.max_qp_rd_atom;
847 conn_param.initiator_depth = 1;
848 conn_param.retry_count = 7;
849 conn_param.rnr_retry_count = 6;
851 memset(&req_hdr, 0, sizeof(req_hdr));
852 req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED |
853 ISER_SEND_W_INV_NOT_SUPPORTED);
854 conn_param.private_data = (void *)&req_hdr;
855 conn_param.private_data_len = sizeof(struct iser_cm_hdr);
857 ret = rdma_connect(cma_id, &conn_param);
859 iser_err("failure connecting: %d\n", ret);
865 iser_connect_error(cma_id);
868 static void iser_connected_handler(struct rdma_cm_id *cma_id)
870 struct iser_conn *iser_conn;
871 struct ib_qp_attr attr;
872 struct ib_qp_init_attr init_attr;
874 iser_conn = (struct iser_conn *)cma_id->context;
875 if (iser_conn->state != ISER_CONN_PENDING)
879 (void)ib_query_qp(cma_id->qp, &attr, ~0, &init_attr);
880 iser_info("remote qpn:%x my qpn:%x\n", attr.dest_qp_num, cma_id->qp->qp_num);
882 iser_conn->state = ISER_CONN_UP;
883 complete(&iser_conn->up_completion);
886 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
888 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
890 if (iser_conn_terminate(iser_conn)) {
891 if (iser_conn->iscsi_conn)
892 iscsi_conn_failure(iser_conn->iscsi_conn,
893 ISCSI_ERR_CONN_FAILED);
895 iser_err("iscsi_iser connection isn't bound\n");
899 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
902 struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
905 * We are not guaranteed that we visited disconnected_handler
906 * by now, call it here to be safe that we handle CM drep
909 iser_disconnected_handler(cma_id);
910 iser_free_ib_conn_res(iser_conn, destroy);
911 complete(&iser_conn->ib_completion);
914 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
916 struct iser_conn *iser_conn;
919 iser_conn = (struct iser_conn *)cma_id->context;
920 iser_info("%s (%d): status %d conn %p id %p\n",
921 rdma_event_msg(event->event), event->event,
922 event->status, cma_id->context, cma_id);
924 mutex_lock(&iser_conn->state_mutex);
925 switch (event->event) {
926 case RDMA_CM_EVENT_ADDR_RESOLVED:
927 iser_addr_handler(cma_id);
929 case RDMA_CM_EVENT_ROUTE_RESOLVED:
930 iser_route_handler(cma_id);
932 case RDMA_CM_EVENT_ESTABLISHED:
933 iser_connected_handler(cma_id);
935 case RDMA_CM_EVENT_ADDR_ERROR:
936 case RDMA_CM_EVENT_ROUTE_ERROR:
937 case RDMA_CM_EVENT_CONNECT_ERROR:
938 case RDMA_CM_EVENT_UNREACHABLE:
939 case RDMA_CM_EVENT_REJECTED:
940 iser_connect_error(cma_id);
942 case RDMA_CM_EVENT_DISCONNECTED:
943 case RDMA_CM_EVENT_ADDR_CHANGE:
944 case RDMA_CM_EVENT_TIMEWAIT_EXIT:
945 iser_cleanup_handler(cma_id, false);
947 case RDMA_CM_EVENT_DEVICE_REMOVAL:
949 * we *must* destroy the device as we cannot rely
950 * on iscsid to be around to initiate error handling.
951 * also if we are not in state DOWN implicitly destroy
954 iser_cleanup_handler(cma_id, true);
955 if (iser_conn->state != ISER_CONN_DOWN) {
956 iser_conn->ib_conn.cma_id = NULL;
961 iser_err("Unexpected RDMA CM event: %s (%d)\n",
962 rdma_event_msg(event->event), event->event);
965 mutex_unlock(&iser_conn->state_mutex);
970 void iser_conn_init(struct iser_conn *iser_conn)
972 iser_conn->state = ISER_CONN_INIT;
973 iser_conn->ib_conn.post_recv_buf_count = 0;
974 init_completion(&iser_conn->ib_conn.flush_comp);
975 init_completion(&iser_conn->stop_completion);
976 init_completion(&iser_conn->ib_completion);
977 init_completion(&iser_conn->up_completion);
978 INIT_LIST_HEAD(&iser_conn->conn_list);
979 mutex_init(&iser_conn->state_mutex);
983 * starts the process of connecting to the target
984 * sleeps until the connection is established or rejected
986 int iser_connect(struct iser_conn *iser_conn,
987 struct sockaddr *src_addr,
988 struct sockaddr *dst_addr,
991 struct ib_conn *ib_conn = &iser_conn->ib_conn;
994 mutex_lock(&iser_conn->state_mutex);
996 sprintf(iser_conn->name, "%pISp", dst_addr);
998 iser_info("connecting to: %s\n", iser_conn->name);
1000 /* the device is known only --after-- address resolution */
1001 ib_conn->device = NULL;
1003 iser_conn->state = ISER_CONN_PENDING;
1005 ib_conn->beacon.wr_id = ISER_BEACON_WRID;
1006 ib_conn->beacon.opcode = IB_WR_SEND;
1008 ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
1010 RDMA_PS_TCP, IB_QPT_RC);
1011 if (IS_ERR(ib_conn->cma_id)) {
1012 err = PTR_ERR(ib_conn->cma_id);
1013 iser_err("rdma_create_id failed: %d\n", err);
1017 err = rdma_resolve_addr(ib_conn->cma_id, src_addr, dst_addr, 1000);
1019 iser_err("rdma_resolve_addr failed: %d\n", err);
1023 if (!non_blocking) {
1024 wait_for_completion_interruptible(&iser_conn->up_completion);
1026 if (iser_conn->state != ISER_CONN_UP) {
1028 goto connect_failure;
1031 mutex_unlock(&iser_conn->state_mutex);
1033 mutex_lock(&ig.connlist_mutex);
1034 list_add(&iser_conn->conn_list, &ig.connlist);
1035 mutex_unlock(&ig.connlist_mutex);
1039 ib_conn->cma_id = NULL;
1041 iser_conn->state = ISER_CONN_DOWN;
1043 mutex_unlock(&iser_conn->state_mutex);
1044 iser_conn_release(iser_conn);
1048 int iser_post_recvl(struct iser_conn *iser_conn)
1050 struct ib_recv_wr rx_wr, *rx_wr_failed;
1051 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1055 sge.addr = iser_conn->login_resp_dma;
1056 sge.length = ISER_RX_LOGIN_SIZE;
1057 sge.lkey = ib_conn->device->pd->local_dma_lkey;
1059 rx_wr.wr_id = (uintptr_t)iser_conn->login_resp_buf;
1060 rx_wr.sg_list = &sge;
1064 ib_conn->post_recv_buf_count++;
1065 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
1067 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1068 ib_conn->post_recv_buf_count--;
1073 int iser_post_recvm(struct iser_conn *iser_conn, int count)
1075 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1077 struct ib_conn *ib_conn = &iser_conn->ib_conn;
1078 unsigned int my_rx_head = iser_conn->rx_desc_head;
1079 struct iser_rx_desc *rx_desc;
1081 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1082 rx_desc = &iser_conn->rx_descs[my_rx_head];
1083 rx_wr->wr_id = (uintptr_t)rx_desc;
1084 rx_wr->sg_list = &rx_desc->rx_sg;
1086 rx_wr->next = rx_wr + 1;
1087 my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
1091 rx_wr->next = NULL; /* mark end of work requests list */
1093 ib_conn->post_recv_buf_count += count;
1094 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
1096 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
1097 ib_conn->post_recv_buf_count -= count;
1099 iser_conn->rx_desc_head = my_rx_head;
1105 * iser_start_send - Initiate a Send DTO operation
1107 * returns 0 on success, -1 on failure
1109 int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
1112 struct ib_send_wr *bad_wr, *wr = iser_tx_next_wr(tx_desc);
1115 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
1116 tx_desc->dma_addr, ISER_HEADERS_LEN,
1120 wr->wr_id = (uintptr_t)tx_desc;
1121 wr->sg_list = tx_desc->tx_sg;
1122 wr->num_sge = tx_desc->num_sge;
1123 wr->opcode = IB_WR_SEND;
1124 wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
1126 ib_ret = ib_post_send(ib_conn->qp, &tx_desc->wrs[0].send, &bad_wr);
1128 iser_err("ib_post_send failed, ret:%d opcode:%d\n",
1129 ib_ret, bad_wr->opcode);
1135 * is_iser_tx_desc - Indicate if the completion wr_id
1136 * is a TX descriptor or not.
1137 * @iser_conn: iser connection
1138 * @wr_id: completion WR identifier
1140 * Since we cannot rely on wc opcode in FLUSH errors
1141 * we must work around it by checking if the wr_id address
1142 * falls in the iser connection rx_descs buffer. If so
1143 * it is an RX descriptor, otherwize it is a TX.
1146 is_iser_tx_desc(struct iser_conn *iser_conn, void *wr_id)
1148 void *start = iser_conn->rx_descs;
1149 int len = iser_conn->num_rx_descs * sizeof(*iser_conn->rx_descs);
1151 if (wr_id >= start && wr_id < start + len)
1158 * iser_handle_comp_error() - Handle error completion
1159 * @ib_conn: connection RDMA resources
1160 * @wc: work completion
1162 * Notes: We may handle a FLUSH error completion and in this case
1163 * we only cleanup in case TX type was DATAOUT. For non-FLUSH
1164 * error completion we should also notify iscsi layer that
1165 * connection is failed (in case we passed bind stage).
1168 iser_handle_comp_error(struct ib_conn *ib_conn,
1171 void *wr_id = (void *)(uintptr_t)wc->wr_id;
1172 struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
1175 if (wc->status != IB_WC_WR_FLUSH_ERR)
1176 if (iser_conn->iscsi_conn)
1177 iscsi_conn_failure(iser_conn->iscsi_conn,
1178 ISCSI_ERR_CONN_FAILED);
1180 if (wc->wr_id == ISER_FASTREG_LI_WRID)
1183 if (is_iser_tx_desc(iser_conn, wr_id)) {
1184 struct iser_tx_desc *desc = wr_id;
1186 if (desc->type == ISCSI_TX_DATAOUT)
1187 kmem_cache_free(ig.desc_cache, desc);
1189 ib_conn->post_recv_buf_count--;
1194 * iser_handle_wc - handle a single work completion
1195 * @wc: work completion
1197 * Soft-IRQ context, work completion can be either
1198 * SEND or RECV, and can turn out successful or
1199 * with error (or flush error).
1201 static void iser_handle_wc(struct ib_wc *wc)
1203 struct ib_conn *ib_conn;
1204 struct iser_tx_desc *tx_desc;
1205 struct iser_rx_desc *rx_desc;
1207 ib_conn = wc->qp->qp_context;
1208 if (likely(wc->status == IB_WC_SUCCESS)) {
1209 if (wc->opcode == IB_WC_RECV) {
1210 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
1211 iser_rcv_completion(rx_desc, wc->byte_len,
1214 if (wc->opcode == IB_WC_SEND) {
1215 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
1216 iser_snd_completion(tx_desc, ib_conn);
1218 iser_err("Unknown wc opcode %d\n", wc->opcode);
1221 if (wc->status != IB_WC_WR_FLUSH_ERR)
1222 iser_err("%s (%d): wr id %llx vend_err %x\n",
1223 ib_wc_status_msg(wc->status), wc->status,
1224 wc->wr_id, wc->vendor_err);
1226 iser_dbg("%s (%d): wr id %llx\n",
1227 ib_wc_status_msg(wc->status), wc->status,
1230 if (wc->wr_id == ISER_BEACON_WRID)
1231 /* all flush errors were consumed */
1232 complete(&ib_conn->flush_comp);
1234 iser_handle_comp_error(ib_conn, wc);
1239 * iser_cq_tasklet_fn - iSER completion polling loop
1240 * @data: iSER completion context
1242 * Soft-IRQ context, polling connection CQ until
1243 * either CQ was empty or we exausted polling budget
1245 static void iser_cq_tasklet_fn(unsigned long data)
1247 struct iser_comp *comp = (struct iser_comp *)data;
1248 struct ib_cq *cq = comp->cq;
1249 struct ib_wc *const wcs = comp->wcs;
1250 int i, n, completed = 0;
1252 while ((n = ib_poll_cq(cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
1253 for (i = 0; i < n; i++)
1254 iser_handle_wc(&wcs[i]);
1257 if (completed >= iser_cq_poll_limit)
1262 * It is assumed here that arming CQ only once its empty
1263 * would not cause interrupts to be missed.
1265 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1267 iser_dbg("got %d completions\n", completed);
1270 static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
1272 struct iser_comp *comp = cq_context;
1274 tasklet_schedule(&comp->tasklet);
1277 u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
1278 enum iser_data_dir cmd_dir, sector_t *sector)
1280 struct iser_mem_reg *reg = &iser_task->rdma_reg[cmd_dir];
1281 struct iser_fr_desc *desc = reg->mem_h;
1282 unsigned long sector_size = iser_task->sc->device->sector_size;
1283 struct ib_mr_status mr_status;
1286 if (desc && desc->pi_ctx->sig_protected) {
1287 desc->pi_ctx->sig_protected = 0;
1288 ret = ib_check_mr_status(desc->pi_ctx->sig_mr,
1289 IB_MR_CHECK_SIG_STATUS, &mr_status);
1291 pr_err("ib_check_mr_status failed, ret %d\n", ret);
1292 /* Not a lot we can do, return ambiguous guard error */
1297 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1298 sector_t sector_off = mr_status.sig_err.sig_err_offset;
1300 sector_div(sector_off, sector_size + 8);
1301 *sector = scsi_get_lba(iser_task->sc) + sector_off;
1303 pr_err("PI error found type %d at sector %llx "
1304 "expected %x vs actual %x\n",
1305 mr_status.sig_err.err_type,
1306 (unsigned long long)*sector,
1307 mr_status.sig_err.expected,
1308 mr_status.sig_err.actual);
1310 switch (mr_status.sig_err.err_type) {
1311 case IB_SIG_BAD_GUARD:
1313 case IB_SIG_BAD_REFTAG:
1315 case IB_SIG_BAD_APPTAG: