4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lnet/klnds/o2iblnd/o2iblnd.h
34 * Author: Eric Barton <eric@bartonsoftware.com>
37 #include <linux/module.h>
38 #include <linux/kernel.h>
40 #include <linux/string.h>
41 #include <linux/stat.h>
42 #include <linux/errno.h>
43 #include <linux/unistd.h>
44 #include <linux/uio.h>
45 #include <linux/uaccess.h>
50 #include <linux/file.h>
51 #include <linux/list.h>
52 #include <linux/kmod.h>
53 #include <linux/sysctl.h>
54 #include <linux/pci.h>
59 #include <rdma/rdma_cm.h>
60 #include <rdma/ib_cm.h>
61 #include <rdma/ib_verbs.h>
62 #include <rdma/ib_fmr_pool.h>
64 #define DEBUG_SUBSYSTEM S_LND
66 #include <linux/libcfs/libcfs.h>
67 #include <linux/lnet/lib-lnet.h>
69 #define IBLND_PEER_HASH_SIZE 101 /* # peer lists */
70 /* # scheduler loops before reschedule */
71 #define IBLND_RESCHED 100
73 #define IBLND_N_SCHED 2
74 #define IBLND_N_SCHED_HIGH 4
77 int *kib_dev_failover; /* HCA failover */
78 unsigned int *kib_service; /* IB service number */
79 int *kib_min_reconnect_interval; /* first failed connection retry... */
80 int *kib_max_reconnect_interval; /* exponentially increasing to this */
81 int *kib_cksum; /* checksum struct kib_msg? */
82 int *kib_timeout; /* comms timeout (seconds) */
83 int *kib_keepalive; /* keepalive timeout (seconds) */
84 int *kib_ntx; /* # tx descs */
85 char **kib_default_ipif; /* default IPoIB interface */
87 int *kib_rnr_retry_count;
88 int *kib_ib_mtu; /* IB MTU */
89 int *kib_require_priv_port; /* accept only privileged ports */
90 int *kib_use_priv_port; /* use privileged port for active connect */
91 int *kib_nscheds; /* # threads on each CPT */
94 extern struct kib_tunables kiblnd_tunables;
96 #define IBLND_MSG_QUEUE_SIZE_V1 8 /* V1 only : # messages/RDMAs in-flight */
97 #define IBLND_CREDIT_HIGHWATER_V1 7 /* V1 only : when eagerly to return credits */
99 #define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
100 #define IBLND_CREDITS_MAX ((typeof(((struct kib_msg *)0)->ibm_credits)) - 1) /* Max # of peer credits */
102 /* when eagerly to return credits */
103 #define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
104 IBLND_CREDIT_HIGHWATER_V1 : \
105 t->lnd_peercredits_hiw)
107 #define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(current->nsproxy->net_ns, \
111 /* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
112 #define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
113 #define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
115 #define IBLND_FRAG_SHIFT (PAGE_SHIFT - 12) /* frag size on wire is in 4K units */
116 #define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
117 #define IBLND_MAX_RDMA_FRAGS (LNET_MAX_PAYLOAD >> 12)/* max # of fragments supported in 4K size */
119 /************************/
120 /* derived constants... */
121 /* Pools (shared by connections on each CPT) */
122 /* These pools can grow at runtime, so don't need give a very large value */
123 #define IBLND_TX_POOL 256
124 #define IBLND_FMR_POOL 256
125 #define IBLND_FMR_POOL_FLUSH 192
127 #define IBLND_RX_MSGS(c) \
128 ((c->ibc_queue_depth) * 2 + IBLND_OOB_MSGS(c->ibc_version))
129 #define IBLND_RX_MSG_BYTES(c) (IBLND_RX_MSGS(c) * IBLND_MSG_SIZE)
130 #define IBLND_RX_MSG_PAGES(c) \
131 ((IBLND_RX_MSG_BYTES(c) + PAGE_SIZE - 1) / PAGE_SIZE)
133 /* WRs and CQEs (per connection) */
134 #define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
135 #define IBLND_SEND_WRS(c) \
136 (((c->ibc_max_frags + 1) << IBLND_FRAG_SHIFT) * \
137 kiblnd_concurrent_sends(c->ibc_version, c->ibc_peer->ibp_ni))
138 #define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
142 /* o2iblnd can run over aliased interface */
144 #define KIB_IFNAME_SIZE IFALIASZ
146 #define KIB_IFNAME_SIZE 256
150 struct list_head ibd_list; /* chain on kib_devs */
151 struct list_head ibd_fail_list; /* chain on kib_failed_devs */
152 __u32 ibd_ifip; /* IPoIB interface IP */
154 /* IPoIB interface name */
155 char ibd_ifname[KIB_IFNAME_SIZE];
156 int ibd_nnets; /* # nets extant */
158 unsigned long ibd_next_failover;
159 int ibd_failed_failover; /* # failover failures */
160 unsigned int ibd_failover; /* failover in progress */
161 unsigned int ibd_can_failover; /* IPoIB interface is a bonding master */
162 struct list_head ibd_nets;
163 struct kib_hca_dev *ibd_hdev;
167 struct rdma_cm_id *ibh_cmid; /* listener cmid */
168 struct ib_device *ibh_ibdev; /* IB device */
169 int ibh_page_shift; /* page shift of current HCA */
170 int ibh_page_size; /* page size of current HCA */
171 __u64 ibh_page_mask; /* page mask of current HCA */
172 int ibh_mr_shift; /* bits shift of max MR size */
173 __u64 ibh_mr_size; /* size of MR */
174 struct ib_pd *ibh_pd; /* PD */
175 struct kib_dev *ibh_dev; /* owner */
176 atomic_t ibh_ref; /* refcount */
179 /** # of seconds to keep pool alive */
180 #define IBLND_POOL_DEADLINE 300
181 /** # of seconds to retry if allocation failed */
182 #define IBLND_POOL_RETRY 1
185 int ibp_npages; /* # pages */
186 struct page *ibp_pages[0]; /* page array */
192 typedef int (*kib_ps_pool_create_t)(struct kib_poolset *ps,
193 int inc, struct kib_pool **pp_po);
194 typedef void (*kib_ps_pool_destroy_t)(struct kib_pool *po);
195 typedef void (*kib_ps_node_init_t)(struct kib_pool *po, struct list_head *node);
196 typedef void (*kib_ps_node_fini_t)(struct kib_pool *po, struct list_head *node);
200 #define IBLND_POOL_NAME_LEN 32
203 spinlock_t ps_lock; /* serialize */
204 struct kib_net *ps_net; /* network it belongs to */
205 char ps_name[IBLND_POOL_NAME_LEN]; /* pool set name */
206 struct list_head ps_pool_list; /* list of pools */
207 struct list_head ps_failed_pool_list;/* failed pool list */
208 unsigned long ps_next_retry; /* time stamp for retry if */
209 /* failed to allocate */
210 int ps_increasing; /* is allocating new pool */
211 int ps_pool_size; /* new pool size */
212 int ps_cpt; /* CPT id */
214 kib_ps_pool_create_t ps_pool_create; /* create a new pool */
215 kib_ps_pool_destroy_t ps_pool_destroy; /* destroy a pool */
216 kib_ps_node_init_t ps_node_init; /* initialize new allocated node */
217 kib_ps_node_fini_t ps_node_fini; /* finalize node */
221 struct list_head po_list; /* chain on pool list */
222 struct list_head po_free_list; /* pre-allocated node */
223 struct kib_poolset *po_owner; /* pool_set of this pool */
224 unsigned long po_deadline; /* deadline of this pool */
225 int po_allocated; /* # of elements in use */
226 int po_failed; /* pool is created on failed HCA */
227 int po_size; /* # of pre-allocated elements */
230 struct kib_tx_poolset {
231 struct kib_poolset tps_poolset; /* pool-set */
232 __u64 tps_next_tx_cookie; /* cookie of TX */
236 struct kib_pool tpo_pool; /* pool */
237 struct kib_hca_dev *tpo_hdev; /* device for this pool */
238 struct kib_tx *tpo_tx_descs; /* all the tx descriptors */
239 struct kib_pages *tpo_tx_pages; /* premapped tx msg pages */
242 struct kib_fmr_poolset {
243 spinlock_t fps_lock; /* serialize */
244 struct kib_net *fps_net; /* IB network */
245 struct list_head fps_pool_list; /* FMR pool list */
246 struct list_head fps_failed_pool_list;/* FMR pool list */
247 __u64 fps_version; /* validity stamp */
248 int fps_cpt; /* CPT id */
250 int fps_flush_trigger;
252 int fps_increasing; /* is allocating new pool */
253 unsigned long fps_next_retry; /* time stamp for retry if*/
254 /* failed to allocate */
257 struct kib_fast_reg_descriptor { /* For fast registration */
258 struct list_head frd_list;
259 struct ib_send_wr frd_inv_wr;
260 struct ib_reg_wr frd_fastreg_wr;
261 struct ib_mr *frd_mr;
265 struct kib_fmr_pool {
266 struct list_head fpo_list; /* chain on pool list */
267 struct kib_hca_dev *fpo_hdev; /* device for this pool */
268 struct kib_fmr_poolset *fpo_owner; /* owner of this pool */
271 struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
273 struct { /* For fast registration */
274 struct list_head fpo_pool_list;
278 unsigned long fpo_deadline; /* deadline of this pool */
279 int fpo_failed; /* fmr pool is failed */
280 int fpo_map_count; /* # of mapped FMR */
285 struct kib_fmr_pool *fmr_pool; /* pool of FMR */
286 struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
287 struct kib_fast_reg_descriptor *fmr_frd;
292 struct list_head ibn_list; /* chain on struct kib_dev::ibd_nets */
293 __u64 ibn_incarnation;/* my epoch */
294 int ibn_init; /* initialisation state */
295 int ibn_shutdown; /* shutting down? */
297 atomic_t ibn_npeers; /* # peers extant */
298 atomic_t ibn_nconns; /* # connections extant */
300 struct kib_tx_poolset **ibn_tx_ps; /* tx pool-set */
301 struct kib_fmr_poolset **ibn_fmr_ps; /* fmr pool-set */
303 struct kib_dev *ibn_dev; /* underlying IB device */
306 #define KIB_THREAD_SHIFT 16
307 #define KIB_THREAD_ID(cpt, tid) ((cpt) << KIB_THREAD_SHIFT | (tid))
308 #define KIB_THREAD_CPT(id) ((id) >> KIB_THREAD_SHIFT)
309 #define KIB_THREAD_TID(id) ((id) & ((1UL << KIB_THREAD_SHIFT) - 1))
311 struct kib_sched_info {
312 spinlock_t ibs_lock; /* serialise */
313 wait_queue_head_t ibs_waitq; /* schedulers sleep here */
314 struct list_head ibs_conns; /* conns to check for rx completions */
315 int ibs_nthreads; /* number of scheduler threads */
316 int ibs_nthreads_max; /* max allowed scheduler threads */
317 int ibs_cpt; /* CPT id */
321 int kib_init; /* initialisation state */
322 int kib_shutdown; /* shut down? */
323 struct list_head kib_devs; /* IB devices extant */
324 struct list_head kib_failed_devs; /* list head of failed devices */
325 wait_queue_head_t kib_failover_waitq; /* schedulers sleep here */
326 atomic_t kib_nthreads; /* # live threads */
327 rwlock_t kib_global_lock; /* stabilize net/dev/peer/conn ops */
328 struct list_head *kib_peers; /* hash table of all my known peers */
329 int kib_peer_hash_size; /* size of kib_peers */
330 void *kib_connd; /* the connd task (serialisation assertions) */
331 struct list_head kib_connd_conns; /* connections to setup/teardown */
332 struct list_head kib_connd_zombies; /* connections with zero refcount */
333 /* connections to reconnect */
334 struct list_head kib_reconn_list;
335 /* peers wait for reconnection */
336 struct list_head kib_reconn_wait;
338 * The second that peers are pulled out from \a kib_reconn_wait
341 time64_t kib_reconn_sec;
343 wait_queue_head_t kib_connd_waitq; /* connection daemon sleeps here */
344 spinlock_t kib_connd_lock; /* serialise */
345 struct ib_qp_attr kib_error_qpa; /* QP->ERROR */
346 struct kib_sched_info **kib_scheds; /* percpt data for schedulers */
349 #define IBLND_INIT_NOTHING 0
350 #define IBLND_INIT_DATA 1
351 #define IBLND_INIT_ALL 2
353 /************************************************************************
354 * IB Wire message format.
355 * These are sent in sender's byte order (i.e. receiver flips).
358 struct kib_connparams {
359 __u16 ibcp_queue_depth;
360 __u16 ibcp_max_frags;
361 __u32 ibcp_max_msg_size;
364 struct kib_immediate_msg {
365 struct lnet_hdr ibim_hdr; /* portals header */
366 char ibim_payload[0]; /* piggy-backed payload */
369 struct kib_rdma_frag {
370 __u32 rf_nob; /* # bytes this frag */
371 __u64 rf_addr; /* CAVEAT EMPTOR: misaligned!! */
374 struct kib_rdma_desc {
375 __u32 rd_key; /* local/remote key */
376 __u32 rd_nfrags; /* # fragments */
377 struct kib_rdma_frag rd_frags[0]; /* buffer frags */
380 struct kib_putreq_msg {
381 struct lnet_hdr ibprm_hdr; /* portals header */
382 __u64 ibprm_cookie; /* opaque completion cookie */
385 struct kib_putack_msg {
386 __u64 ibpam_src_cookie; /* reflected completion cookie */
387 __u64 ibpam_dst_cookie; /* opaque completion cookie */
388 struct kib_rdma_desc ibpam_rd; /* sender's sink buffer */
392 struct lnet_hdr ibgm_hdr; /* portals header */
393 __u64 ibgm_cookie; /* opaque completion cookie */
394 struct kib_rdma_desc ibgm_rd; /* rdma descriptor */
397 struct kib_completion_msg {
398 __u64 ibcm_cookie; /* opaque completion cookie */
399 __s32 ibcm_status; /* < 0 failure: >= 0 length */
403 /* First 2 fields fixed FOR ALL TIME */
404 __u32 ibm_magic; /* I'm an ibnal message */
405 __u16 ibm_version; /* this is my version number */
407 __u8 ibm_type; /* msg type */
408 __u8 ibm_credits; /* returned credits */
409 __u32 ibm_nob; /* # bytes in whole message */
410 __u32 ibm_cksum; /* checksum (0 == no checksum) */
411 __u64 ibm_srcnid; /* sender's NID */
412 __u64 ibm_srcstamp; /* sender's incarnation */
413 __u64 ibm_dstnid; /* destination's NID */
414 __u64 ibm_dststamp; /* destination's incarnation */
417 struct kib_connparams connparams;
418 struct kib_immediate_msg immediate;
419 struct kib_putreq_msg putreq;
420 struct kib_putack_msg putack;
421 struct kib_get_msg get;
422 struct kib_completion_msg completion;
426 #define IBLND_MSG_MAGIC LNET_PROTO_IB_MAGIC /* unique magic */
428 #define IBLND_MSG_VERSION_1 0x11
429 #define IBLND_MSG_VERSION_2 0x12
430 #define IBLND_MSG_VERSION IBLND_MSG_VERSION_2
432 #define IBLND_MSG_CONNREQ 0xc0 /* connection request */
433 #define IBLND_MSG_CONNACK 0xc1 /* connection acknowledge */
434 #define IBLND_MSG_NOOP 0xd0 /* nothing (just credits) */
435 #define IBLND_MSG_IMMEDIATE 0xd1 /* immediate */
436 #define IBLND_MSG_PUT_REQ 0xd2 /* putreq (src->sink) */
437 #define IBLND_MSG_PUT_NAK 0xd3 /* completion (sink->src) */
438 #define IBLND_MSG_PUT_ACK 0xd4 /* putack (sink->src) */
439 #define IBLND_MSG_PUT_DONE 0xd5 /* completion (src->sink) */
440 #define IBLND_MSG_GET_REQ 0xd6 /* getreq (sink->src) */
441 #define IBLND_MSG_GET_DONE 0xd7 /* completion (src->sink: all OK) */
444 __u32 ibr_magic; /* sender's magic */
445 __u16 ibr_version; /* sender's version */
446 __u8 ibr_why; /* reject reason */
447 __u8 ibr_padding; /* padding */
448 __u64 ibr_incarnation; /* incarnation of peer */
449 struct kib_connparams ibr_cp; /* connection parameters */
452 /* connection rejection reasons */
453 #define IBLND_REJECT_CONN_RACE 1 /* You lost connection race */
454 #define IBLND_REJECT_NO_RESOURCES 2 /* Out of memory/conns etc */
455 #define IBLND_REJECT_FATAL 3 /* Anything else */
456 #define IBLND_REJECT_CONN_UNCOMPAT 4 /* incompatible version peer */
457 #define IBLND_REJECT_CONN_STALE 5 /* stale peer */
458 /* peer's rdma frags doesn't match mine */
459 #define IBLND_REJECT_RDMA_FRAGS 6
460 /* peer's msg queue size doesn't match mine */
461 #define IBLND_REJECT_MSG_QUEUE_SIZE 7
463 /***********************************************************************/
465 struct kib_rx { /* receive message */
466 struct list_head rx_list; /* queue for attention */
467 struct kib_conn *rx_conn; /* owning conn */
468 int rx_nob; /* # bytes received (-1 while posted) */
469 enum ib_wc_status rx_status; /* completion status */
470 struct kib_msg *rx_msg; /* message buffer (host vaddr) */
471 __u64 rx_msgaddr; /* message buffer (I/O addr) */
472 DECLARE_PCI_UNMAP_ADDR(rx_msgunmap); /* for dma_unmap_single() */
473 struct ib_recv_wr rx_wrq; /* receive work item... */
474 struct ib_sge rx_sge; /* ...and its memory */
477 #define IBLND_POSTRX_DONT_POST 0 /* don't post */
478 #define IBLND_POSTRX_NO_CREDIT 1 /* post: no credits */
479 #define IBLND_POSTRX_PEER_CREDIT 2 /* post: give peer back 1 credit */
480 #define IBLND_POSTRX_RSRVD_CREDIT 3 /* post: give self back 1 reserved credit */
482 struct kib_tx { /* transmit message */
483 struct list_head tx_list; /* queue on idle_txs ibc_tx_queue etc. */
484 struct kib_tx_pool *tx_pool; /* pool I'm from */
485 struct kib_conn *tx_conn; /* owning conn */
486 short tx_sending; /* # tx callbacks outstanding */
487 short tx_queued; /* queued for sending */
488 short tx_waiting; /* waiting for peer */
489 int tx_status; /* LNET completion status */
490 unsigned long tx_deadline; /* completion deadline */
491 __u64 tx_cookie; /* completion cookie */
492 struct lnet_msg *tx_lntmsg[2]; /* lnet msgs to finalize on completion */
493 struct kib_msg *tx_msg; /* message buffer (host vaddr) */
494 __u64 tx_msgaddr; /* message buffer (I/O addr) */
495 DECLARE_PCI_UNMAP_ADDR(tx_msgunmap); /* for dma_unmap_single() */
496 int tx_nwrq; /* # send work items */
497 struct ib_rdma_wr *tx_wrq; /* send work items... */
498 struct ib_sge *tx_sge; /* ...and their memory */
499 struct kib_rdma_desc *tx_rd; /* rdma descriptor */
500 int tx_nfrags; /* # entries in... */
501 struct scatterlist *tx_frags; /* dma_map_sg descriptor */
502 __u64 *tx_pages; /* rdma phys page addrs */
503 struct kib_fmr fmr; /* FMR */
504 int tx_dmadir; /* dma direction */
507 struct kib_connvars {
508 struct kib_msg cv_msg; /* connection-in-progress variables */
512 struct kib_sched_info *ibc_sched; /* scheduler information */
513 struct kib_peer *ibc_peer; /* owning peer */
514 struct kib_hca_dev *ibc_hdev; /* HCA bound on */
515 struct list_head ibc_list; /* stash on peer's conn list */
516 struct list_head ibc_sched_list; /* schedule for attention */
517 __u16 ibc_version; /* version of connection */
518 /* reconnect later */
519 __u16 ibc_reconnect:1;
520 __u64 ibc_incarnation; /* which instance of the peer */
521 atomic_t ibc_refcount; /* # users */
522 int ibc_state; /* what's happening */
523 int ibc_nsends_posted; /* # uncompleted sends */
524 int ibc_noops_posted; /* # uncompleted NOOPs */
525 int ibc_credits; /* # credits I have */
526 int ibc_outstanding_credits; /* # credits to return */
527 int ibc_reserved_credits; /* # ACK/DONE msg credits */
528 int ibc_comms_error; /* set on comms error */
529 /* connections queue depth */
530 __u16 ibc_queue_depth;
531 /* connections max frags */
533 unsigned int ibc_nrx:16; /* receive buffers owned */
534 unsigned int ibc_scheduled:1; /* scheduled for attention */
535 unsigned int ibc_ready:1; /* CQ callback fired */
536 unsigned long ibc_last_send; /* time of last send */
537 struct list_head ibc_connd_list; /* link chain for */
538 /* kiblnd_check_conns only */
539 struct list_head ibc_early_rxs; /* rxs completed before ESTABLISHED */
540 struct list_head ibc_tx_noops; /* IBLND_MSG_NOOPs for */
541 /* IBLND_MSG_VERSION_1 */
542 struct list_head ibc_tx_queue; /* sends that need a credit */
543 struct list_head ibc_tx_queue_nocred; /* sends that don't need a */
545 struct list_head ibc_tx_queue_rsrvd; /* sends that need to */
546 /* reserve an ACK/DONE msg */
547 struct list_head ibc_active_txs; /* active tx awaiting completion */
548 spinlock_t ibc_lock; /* serialise */
549 struct kib_rx *ibc_rxs; /* the rx descs */
550 struct kib_pages *ibc_rx_pages; /* premapped rx msg pages */
552 struct rdma_cm_id *ibc_cmid; /* CM id */
553 struct ib_cq *ibc_cq; /* completion queue */
555 struct kib_connvars *ibc_connvars; /* in-progress connection state */
558 #define IBLND_CONN_INIT 0 /* being initialised */
559 #define IBLND_CONN_ACTIVE_CONNECT 1 /* active sending req */
560 #define IBLND_CONN_PASSIVE_WAIT 2 /* passive waiting for rtu */
561 #define IBLND_CONN_ESTABLISHED 3 /* connection established */
562 #define IBLND_CONN_CLOSING 4 /* being closed */
563 #define IBLND_CONN_DISCONNECTED 5 /* disconnected */
566 struct list_head ibp_list; /* stash on global peer list */
567 lnet_nid_t ibp_nid; /* who's on the other end(s) */
568 struct lnet_ni *ibp_ni; /* LNet interface */
569 struct list_head ibp_conns; /* all active connections */
570 struct list_head ibp_tx_queue; /* msgs waiting for a conn */
571 __u64 ibp_incarnation; /* incarnation of peer */
572 /* when (in jiffies) I was last alive */
573 unsigned long ibp_last_alive;
575 atomic_t ibp_refcount;
576 /* version of peer */
578 /* current passive connection attempts */
579 unsigned short ibp_accepting;
580 /* current active connection attempts */
581 unsigned short ibp_connecting;
582 /* reconnect this peer later */
583 unsigned short ibp_reconnecting:1;
584 /* counter of how many times we triggered a conn race */
585 unsigned char ibp_races;
586 /* # consecutive reconnection attempts to this peer */
587 unsigned int ibp_reconnected;
588 /* errno on closing this peer */
590 /* max map_on_demand */
592 /* max_peer_credits */
593 __u16 ibp_queue_depth;
596 extern struct kib_data kiblnd_data;
598 void kiblnd_hdev_destroy(struct kib_hca_dev *hdev);
600 int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
602 /* max # of fragments configured by user */
604 kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
606 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
609 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
610 mod = tunables->lnd_map_on_demand;
611 return mod ? mod : IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT;
615 kiblnd_rdma_frags(int version, struct lnet_ni *ni)
617 return version == IBLND_MSG_VERSION_1 ?
618 (IBLND_MAX_RDMA_FRAGS >> IBLND_FRAG_SHIFT) :
619 kiblnd_cfg_rdma_frags(ni);
623 kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
625 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
626 int concurrent_sends;
628 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
629 concurrent_sends = tunables->lnd_concurrent_sends;
631 if (version == IBLND_MSG_VERSION_1) {
632 if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
633 return IBLND_MSG_QUEUE_SIZE_V1 * 2;
635 if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
636 return IBLND_MSG_QUEUE_SIZE_V1 / 2;
639 return concurrent_sends;
643 kiblnd_hdev_addref_locked(struct kib_hca_dev *hdev)
645 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
646 atomic_inc(&hdev->ibh_ref);
650 kiblnd_hdev_decref(struct kib_hca_dev *hdev)
652 LASSERT(atomic_read(&hdev->ibh_ref) > 0);
653 if (atomic_dec_and_test(&hdev->ibh_ref))
654 kiblnd_hdev_destroy(hdev);
658 kiblnd_dev_can_failover(struct kib_dev *dev)
660 if (!list_empty(&dev->ibd_fail_list)) /* already scheduled */
663 if (!*kiblnd_tunables.kib_dev_failover) /* disabled */
666 if (*kiblnd_tunables.kib_dev_failover > 1) /* force failover */
669 return dev->ibd_can_failover;
672 #define kiblnd_conn_addref(conn) \
674 CDEBUG(D_NET, "conn[%p] (%d)++\n", \
675 (conn), atomic_read(&(conn)->ibc_refcount)); \
676 atomic_inc(&(conn)->ibc_refcount); \
679 #define kiblnd_conn_decref(conn) \
681 unsigned long flags; \
683 CDEBUG(D_NET, "conn[%p] (%d)--\n", \
684 (conn), atomic_read(&(conn)->ibc_refcount)); \
685 LASSERT_ATOMIC_POS(&(conn)->ibc_refcount); \
686 if (atomic_dec_and_test(&(conn)->ibc_refcount)) { \
687 spin_lock_irqsave(&kiblnd_data.kib_connd_lock, flags); \
688 list_add_tail(&(conn)->ibc_list, \
689 &kiblnd_data.kib_connd_zombies); \
690 wake_up(&kiblnd_data.kib_connd_waitq); \
691 spin_unlock_irqrestore(&kiblnd_data.kib_connd_lock, flags);\
695 #define kiblnd_peer_addref(peer) \
697 CDEBUG(D_NET, "peer[%p] -> %s (%d)++\n", \
698 (peer), libcfs_nid2str((peer)->ibp_nid), \
699 atomic_read(&(peer)->ibp_refcount)); \
700 atomic_inc(&(peer)->ibp_refcount); \
703 #define kiblnd_peer_decref(peer) \
705 CDEBUG(D_NET, "peer[%p] -> %s (%d)--\n", \
706 (peer), libcfs_nid2str((peer)->ibp_nid), \
707 atomic_read(&(peer)->ibp_refcount)); \
708 LASSERT_ATOMIC_POS(&(peer)->ibp_refcount); \
709 if (atomic_dec_and_test(&(peer)->ibp_refcount)) \
710 kiblnd_destroy_peer(peer); \
714 kiblnd_peer_connecting(struct kib_peer *peer)
716 return peer->ibp_connecting ||
717 peer->ibp_reconnecting ||
722 kiblnd_peer_idle(struct kib_peer *peer)
724 return !kiblnd_peer_connecting(peer) && list_empty(&peer->ibp_conns);
727 static inline struct list_head *
728 kiblnd_nid2peerlist(lnet_nid_t nid)
731 ((unsigned int)nid) % kiblnd_data.kib_peer_hash_size;
733 return &kiblnd_data.kib_peers[hash];
737 kiblnd_peer_active(struct kib_peer *peer)
739 /* Am I in the peer hash table? */
740 return !list_empty(&peer->ibp_list);
743 static inline struct kib_conn *
744 kiblnd_get_conn_locked(struct kib_peer *peer)
746 LASSERT(!list_empty(&peer->ibp_conns));
748 /* just return the first connection */
749 return list_entry(peer->ibp_conns.next, struct kib_conn, ibc_list);
753 kiblnd_send_keepalive(struct kib_conn *conn)
755 return (*kiblnd_tunables.kib_keepalive > 0) &&
756 cfs_time_after(jiffies, conn->ibc_last_send +
757 msecs_to_jiffies(*kiblnd_tunables.kib_keepalive *
762 kiblnd_need_noop(struct kib_conn *conn)
764 struct lnet_ioctl_config_o2iblnd_tunables *tunables;
765 struct lnet_ni *ni = conn->ibc_peer->ibp_ni;
767 LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
768 tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
770 if (conn->ibc_outstanding_credits <
771 IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
772 !kiblnd_send_keepalive(conn))
773 return 0; /* No need to send NOOP */
775 if (IBLND_OOB_CAPABLE(conn->ibc_version)) {
776 if (!list_empty(&conn->ibc_tx_queue_nocred))
777 return 0; /* NOOP can be piggybacked */
779 /* No tx to piggyback NOOP onto or no credit to send a tx */
780 return (list_empty(&conn->ibc_tx_queue) ||
784 if (!list_empty(&conn->ibc_tx_noops) || /* NOOP already queued */
785 !list_empty(&conn->ibc_tx_queue_nocred) || /* piggyback NOOP */
786 !conn->ibc_credits) /* no credit */
789 if (conn->ibc_credits == 1 && /* last credit reserved for */
790 !conn->ibc_outstanding_credits) /* giving back credits */
793 /* No tx to piggyback NOOP onto or no credit to send a tx */
794 return (list_empty(&conn->ibc_tx_queue) || conn->ibc_credits == 1);
798 kiblnd_abort_receives(struct kib_conn *conn)
800 ib_modify_qp(conn->ibc_cmid->qp,
801 &kiblnd_data.kib_error_qpa, IB_QP_STATE);
804 static inline const char *
805 kiblnd_queue2str(struct kib_conn *conn, struct list_head *q)
807 if (q == &conn->ibc_tx_queue)
810 if (q == &conn->ibc_tx_queue_rsrvd)
811 return "tx_queue_rsrvd";
813 if (q == &conn->ibc_tx_queue_nocred)
814 return "tx_queue_nocred";
816 if (q == &conn->ibc_active_txs)
823 /* CAVEAT EMPTOR: We rely on descriptor alignment to allow us to use the */
824 /* lowest bits of the work request id to stash the work item type. */
826 #define IBLND_WID_INVAL 0
827 #define IBLND_WID_TX 1
828 #define IBLND_WID_RX 2
829 #define IBLND_WID_RDMA 3
830 #define IBLND_WID_MR 4
831 #define IBLND_WID_MASK 7UL
834 kiblnd_ptr2wreqid(void *ptr, int type)
836 unsigned long lptr = (unsigned long)ptr;
838 LASSERT(!(lptr & IBLND_WID_MASK));
839 LASSERT(!(type & ~IBLND_WID_MASK));
840 return (__u64)(lptr | type);
844 kiblnd_wreqid2ptr(__u64 wreqid)
846 return (void *)(((unsigned long)wreqid) & ~IBLND_WID_MASK);
850 kiblnd_wreqid2type(__u64 wreqid)
852 return wreqid & IBLND_WID_MASK;
856 kiblnd_set_conn_state(struct kib_conn *conn, int state)
858 conn->ibc_state = state;
863 kiblnd_init_msg(struct kib_msg *msg, int type, int body_nob)
865 msg->ibm_type = type;
866 msg->ibm_nob = offsetof(struct kib_msg, ibm_u) + body_nob;
870 kiblnd_rd_size(struct kib_rdma_desc *rd)
875 for (i = size = 0; i < rd->rd_nfrags; i++)
876 size += rd->rd_frags[i].rf_nob;
882 kiblnd_rd_frag_addr(struct kib_rdma_desc *rd, int index)
884 return rd->rd_frags[index].rf_addr;
888 kiblnd_rd_frag_size(struct kib_rdma_desc *rd, int index)
890 return rd->rd_frags[index].rf_nob;
894 kiblnd_rd_frag_key(struct kib_rdma_desc *rd, int index)
900 kiblnd_rd_consume_frag(struct kib_rdma_desc *rd, int index, __u32 nob)
902 if (nob < rd->rd_frags[index].rf_nob) {
903 rd->rd_frags[index].rf_addr += nob;
904 rd->rd_frags[index].rf_nob -= nob;
913 kiblnd_rd_msg_size(struct kib_rdma_desc *rd, int msgtype, int n)
915 LASSERT(msgtype == IBLND_MSG_GET_REQ ||
916 msgtype == IBLND_MSG_PUT_ACK);
918 return msgtype == IBLND_MSG_GET_REQ ?
919 offsetof(struct kib_get_msg, ibgm_rd.rd_frags[n]) :
920 offsetof(struct kib_putack_msg, ibpam_rd.rd_frags[n]);
924 kiblnd_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
926 return ib_dma_mapping_error(dev, dma_addr);
929 static inline __u64 kiblnd_dma_map_single(struct ib_device *dev,
930 void *msg, size_t size,
931 enum dma_data_direction direction)
933 return ib_dma_map_single(dev, msg, size, direction);
936 static inline void kiblnd_dma_unmap_single(struct ib_device *dev,
937 __u64 addr, size_t size,
938 enum dma_data_direction direction)
940 ib_dma_unmap_single(dev, addr, size, direction);
943 #define KIBLND_UNMAP_ADDR_SET(p, m, a) do {} while (0)
944 #define KIBLND_UNMAP_ADDR(p, m, a) (a)
946 static inline int kiblnd_dma_map_sg(struct ib_device *dev,
947 struct scatterlist *sg, int nents,
948 enum dma_data_direction direction)
950 return ib_dma_map_sg(dev, sg, nents, direction);
953 static inline void kiblnd_dma_unmap_sg(struct ib_device *dev,
954 struct scatterlist *sg, int nents,
955 enum dma_data_direction direction)
957 ib_dma_unmap_sg(dev, sg, nents, direction);
960 static inline __u64 kiblnd_sg_dma_address(struct ib_device *dev,
961 struct scatterlist *sg)
963 return ib_sg_dma_address(dev, sg);
966 static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
967 struct scatterlist *sg)
969 return ib_sg_dma_len(dev, sg);
972 /* XXX We use KIBLND_CONN_PARAM(e) as writable buffer, it's not strictly */
973 /* right because OFED1.2 defines it as const, to use it we have to add */
974 /* (void *) cast to overcome "const" */
976 #define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
977 #define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
979 void kiblnd_map_rx_descs(struct kib_conn *conn);
980 void kiblnd_unmap_rx_descs(struct kib_conn *conn);
981 void kiblnd_pool_free_node(struct kib_pool *pool, struct list_head *node);
982 struct list_head *kiblnd_pool_alloc_node(struct kib_poolset *ps);
984 int kiblnd_fmr_pool_map(struct kib_fmr_poolset *fps, struct kib_tx *tx,
985 struct kib_rdma_desc *rd, __u32 nob, __u64 iov,
986 struct kib_fmr *fmr);
987 void kiblnd_fmr_pool_unmap(struct kib_fmr *fmr, int status);
989 int kiblnd_tunables_setup(struct lnet_ni *ni);
990 void kiblnd_tunables_init(void);
992 int kiblnd_connd(void *arg);
993 int kiblnd_scheduler(void *arg);
994 int kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name);
995 int kiblnd_failover_thread(void *arg);
997 int kiblnd_alloc_pages(struct kib_pages **pp, int cpt, int npages);
999 int kiblnd_cm_callback(struct rdma_cm_id *cmid,
1000 struct rdma_cm_event *event);
1001 int kiblnd_translate_mtu(int value);
1003 int kiblnd_dev_failover(struct kib_dev *dev);
1004 int kiblnd_create_peer(struct lnet_ni *ni, struct kib_peer **peerp,
1006 void kiblnd_destroy_peer(struct kib_peer *peer);
1007 bool kiblnd_reconnect_peer(struct kib_peer *peer);
1008 void kiblnd_destroy_dev(struct kib_dev *dev);
1009 void kiblnd_unlink_peer_locked(struct kib_peer *peer);
1010 struct kib_peer *kiblnd_find_peer_locked(lnet_nid_t nid);
1011 int kiblnd_close_stale_conns_locked(struct kib_peer *peer,
1012 int version, __u64 incarnation);
1013 int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
1015 struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
1016 struct rdma_cm_id *cmid,
1017 int state, int version);
1018 void kiblnd_destroy_conn(struct kib_conn *conn);
1019 void kiblnd_close_conn(struct kib_conn *conn, int error);
1020 void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
1022 void kiblnd_launch_tx(struct lnet_ni *ni, struct kib_tx *tx, lnet_nid_t nid);
1023 void kiblnd_txlist_done(struct lnet_ni *ni, struct list_head *txlist,
1026 void kiblnd_qp_event(struct ib_event *event, void *arg);
1027 void kiblnd_cq_event(struct ib_event *event, void *arg);
1028 void kiblnd_cq_completion(struct ib_cq *cq, void *arg);
1030 void kiblnd_pack_msg(struct lnet_ni *ni, struct kib_msg *msg, int version,
1031 int credits, lnet_nid_t dstnid, __u64 dststamp);
1032 int kiblnd_unpack_msg(struct kib_msg *msg, int nob);
1033 int kiblnd_post_rx(struct kib_rx *rx, int credit);
1035 int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
1036 int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
1037 int delayed, struct iov_iter *to, unsigned int rlen);