2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/spinlock.h>
53 #include "verbs_txreq.h"
56 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
58 return (gid->global.interface_id == id &&
59 (gid->global.subnet_prefix == gid_prefix ||
60 gid->global.subnet_prefix == IB_DEFAULT_GID_PREFIX));
65 * This should be called with the QP r_lock held.
67 * The s_lock will be acquired around the hfi1_migrate_qp() call.
69 int hfi1_ruc_check_hdr(struct hfi1_ibport *ibp, struct hfi1_packet *packet)
73 struct rvt_qp *qp = packet->qp;
74 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
75 u32 dlid = packet->dlid;
76 u32 slid = packet->slid;
78 bool migrated = packet->migrated;
79 u16 pkey = packet->pkey;
81 if (qp->s_mig_state == IB_MIG_ARMED && migrated) {
83 if ((rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
85 (packet->etype != RHF_RCV_TYPE_BYPASS))
88 const struct ib_global_route *grh;
90 if (!(rdma_ah_get_ah_flags(&qp->alt_ah_attr) &
93 grh = rdma_ah_read_grh(&qp->alt_ah_attr);
94 guid = get_sguid(ibp, grh->sgid_index);
95 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
100 grh->dgid.global.subnet_prefix,
101 grh->dgid.global.interface_id))
104 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
106 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
110 /* Validate the SLID. See Ch. 9.6.1.5 and 17.2.8 */
111 if (slid != rdma_ah_get_dlid(&qp->alt_ah_attr) ||
112 ppd_from_ibp(ibp)->port !=
113 rdma_ah_get_port_num(&qp->alt_ah_attr))
115 spin_lock_irqsave(&qp->s_lock, flags);
117 spin_unlock_irqrestore(&qp->s_lock, flags);
120 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
122 (packet->etype != RHF_RCV_TYPE_BYPASS))
125 const struct ib_global_route *grh;
127 if (!(rdma_ah_get_ah_flags(&qp->remote_ah_attr) &
130 grh = rdma_ah_read_grh(&qp->remote_ah_attr);
131 guid = get_sguid(ibp, grh->sgid_index);
132 if (!gid_ok(&packet->grh->dgid, ibp->rvp.gid_prefix,
137 grh->dgid.global.subnet_prefix,
138 grh->dgid.global.interface_id))
141 if (unlikely(rcv_pkey_check(ppd_from_ibp(ibp), pkey,
143 hfi1_bad_pkey(ibp, pkey, sl, 0, qp->ibqp.qp_num,
147 /* Validate the SLID. See Ch. 9.6.1.5 */
148 if ((slid != rdma_ah_get_dlid(&qp->remote_ah_attr)) ||
149 ppd_from_ibp(ibp)->port != qp->port_num)
151 if (qp->s_mig_state == IB_MIG_REARM && !migrated)
152 qp->s_mig_state = IB_MIG_ARMED;
159 * ruc_loopback - handle UC and RC loopback requests
160 * @sqp: the sending QP
162 * This is called from hfi1_do_send() to
163 * forward a WQE addressed to the same HFI.
164 * Note that although we are single threaded due to the send engine, we still
165 * have to protect against post_send(). We don't have to worry about
166 * receive interrupts since this is a connected protocol and all packets
167 * will pass through here.
169 static void ruc_loopback(struct rvt_qp *sqp)
171 struct hfi1_ibport *ibp = to_iport(sqp->ibqp.device, sqp->port_num);
173 struct rvt_swqe *wqe;
179 enum ib_wc_status send_status;
182 bool copy_last = false;
188 * Note that we check the responder QP state after
189 * checking the requester's state.
191 qp = rvt_lookup_qpn(ib_to_rvt(sqp->ibqp.device), &ibp->rvp,
194 spin_lock_irqsave(&sqp->s_lock, flags);
196 /* Return if we are already busy processing a work request. */
197 if ((sqp->s_flags & (RVT_S_BUSY | HFI1_S_ANY_WAIT)) ||
198 !(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_OR_FLUSH_SEND))
201 sqp->s_flags |= RVT_S_BUSY;
204 if (sqp->s_last == READ_ONCE(sqp->s_head))
206 wqe = rvt_get_swqe_ptr(sqp, sqp->s_last);
208 /* Return if it is not OK to start a new work request. */
209 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
210 if (!(ib_rvt_state_ops[sqp->state] & RVT_FLUSH_SEND))
212 /* We are in the error state, flush the work request. */
213 send_status = IB_WC_WR_FLUSH_ERR;
218 * We can rely on the entry not changing without the s_lock
219 * being held until we update s_last.
220 * We increment s_cur to indicate s_last is in progress.
222 if (sqp->s_last == sqp->s_cur) {
223 if (++sqp->s_cur >= sqp->s_size)
226 spin_unlock_irqrestore(&sqp->s_lock, flags);
228 if (!qp || !(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) ||
229 qp->ibqp.qp_type != sqp->ibqp.qp_type) {
230 ibp->rvp.n_pkt_drops++;
232 * For RC, the requester would timeout and retry so
233 * shortcut the timeouts and just signal too many retries.
235 if (sqp->ibqp.qp_type == IB_QPT_RC)
236 send_status = IB_WC_RETRY_EXC_ERR;
238 send_status = IB_WC_SUCCESS;
242 memset(&wc, 0, sizeof(wc));
243 send_status = IB_WC_SUCCESS;
246 sqp->s_sge.sge = wqe->sg_list[0];
247 sqp->s_sge.sg_list = wqe->sg_list + 1;
248 sqp->s_sge.num_sge = wqe->wr.num_sge;
249 sqp->s_len = wqe->length;
250 switch (wqe->wr.opcode) {
254 case IB_WR_LOCAL_INV:
255 if (!(wqe->wr.send_flags & RVT_SEND_COMPLETION_ONLY)) {
256 if (rvt_invalidate_rkey(sqp,
257 wqe->wr.ex.invalidate_rkey))
258 send_status = IB_WC_LOC_PROT_ERR;
263 case IB_WR_SEND_WITH_INV:
264 if (!rvt_invalidate_rkey(qp, wqe->wr.ex.invalidate_rkey)) {
265 wc.wc_flags = IB_WC_WITH_INVALIDATE;
266 wc.ex.invalidate_rkey = wqe->wr.ex.invalidate_rkey;
270 case IB_WR_SEND_WITH_IMM:
271 wc.wc_flags = IB_WC_WITH_IMM;
272 wc.ex.imm_data = wqe->wr.ex.imm_data;
276 ret = rvt_get_rwqe(qp, false);
281 if (wqe->length > qp->r_len)
285 case IB_WR_RDMA_WRITE_WITH_IMM:
286 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
288 wc.wc_flags = IB_WC_WITH_IMM;
289 wc.ex.imm_data = wqe->wr.ex.imm_data;
290 ret = rvt_get_rwqe(qp, true);
295 /* skip copy_last set and qp_access_flags recheck */
297 case IB_WR_RDMA_WRITE:
298 copy_last = rvt_is_user_qp(qp);
299 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
302 if (wqe->length == 0)
304 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, wqe->length,
305 wqe->rdma_wr.remote_addr,
307 IB_ACCESS_REMOTE_WRITE)))
309 qp->r_sge.sg_list = NULL;
310 qp->r_sge.num_sge = 1;
311 qp->r_sge.total_len = wqe->length;
314 case IB_WR_RDMA_READ:
315 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
317 if (unlikely(!rvt_rkey_ok(qp, &sqp->s_sge.sge, wqe->length,
318 wqe->rdma_wr.remote_addr,
320 IB_ACCESS_REMOTE_READ)))
323 sqp->s_sge.sg_list = NULL;
324 sqp->s_sge.num_sge = 1;
325 qp->r_sge.sge = wqe->sg_list[0];
326 qp->r_sge.sg_list = wqe->sg_list + 1;
327 qp->r_sge.num_sge = wqe->wr.num_sge;
328 qp->r_sge.total_len = wqe->length;
331 case IB_WR_ATOMIC_CMP_AND_SWP:
332 case IB_WR_ATOMIC_FETCH_AND_ADD:
333 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
335 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
336 wqe->atomic_wr.remote_addr,
338 IB_ACCESS_REMOTE_ATOMIC)))
340 /* Perform atomic OP and save result. */
341 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
342 sdata = wqe->atomic_wr.compare_add;
343 *(u64 *)sqp->s_sge.sge.vaddr =
344 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
345 (u64)atomic64_add_return(sdata, maddr) - sdata :
346 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
347 sdata, wqe->atomic_wr.swap);
348 rvt_put_mr(qp->r_sge.sge.mr);
349 qp->r_sge.num_sge = 0;
353 send_status = IB_WC_LOC_QP_OP_ERR;
357 sge = &sqp->s_sge.sge;
359 u32 len = sqp->s_len;
361 if (len > sge->length)
363 if (len > sge->sge_length)
364 len = sge->sge_length;
365 WARN_ON_ONCE(len == 0);
366 hfi1_copy_sge(&qp->r_sge, sge->vaddr, len, release, copy_last);
369 sge->sge_length -= len;
370 if (sge->sge_length == 0) {
373 if (--sqp->s_sge.num_sge)
374 *sge = *sqp->s_sge.sg_list++;
375 } else if (sge->length == 0 && sge->mr->lkey) {
376 if (++sge->n >= RVT_SEGSZ) {
377 if (++sge->m >= sge->mr->mapsz)
382 sge->mr->map[sge->m]->segs[sge->n].vaddr;
384 sge->mr->map[sge->m]->segs[sge->n].length;
389 rvt_put_ss(&qp->r_sge);
391 if (!test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
394 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
395 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
397 wc.opcode = IB_WC_RECV;
398 wc.wr_id = qp->r_wr_id;
399 wc.status = IB_WC_SUCCESS;
400 wc.byte_len = wqe->length;
402 wc.src_qp = qp->remote_qpn;
403 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
404 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
406 /* Signal completion event if the solicited bit is set. */
407 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
408 wqe->wr.send_flags & IB_SEND_SOLICITED);
411 spin_lock_irqsave(&sqp->s_lock, flags);
412 ibp->rvp.n_loop_pkts++;
414 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
415 hfi1_send_complete(sqp, wqe, send_status);
417 atomic_dec(&sqp->local_ops_pending);
424 if (qp->ibqp.qp_type == IB_QPT_UC)
426 ibp->rvp.n_rnr_naks++;
428 * Note: we don't need the s_lock held since the BUSY flag
429 * makes this single threaded.
431 if (sqp->s_rnr_retry == 0) {
432 send_status = IB_WC_RNR_RETRY_EXC_ERR;
435 if (sqp->s_rnr_retry_cnt < 7)
437 spin_lock_irqsave(&sqp->s_lock, flags);
438 if (!(ib_rvt_state_ops[sqp->state] & RVT_PROCESS_RECV_OK))
440 rvt_add_rnr_timer(sqp, qp->r_min_rnr_timer <<
441 IB_AETH_CREDIT_SHIFT);
445 send_status = IB_WC_REM_OP_ERR;
446 wc.status = IB_WC_LOC_QP_OP_ERR;
451 sqp->ibqp.qp_type == IB_QPT_RC ?
452 IB_WC_REM_INV_REQ_ERR :
454 wc.status = IB_WC_LOC_QP_OP_ERR;
458 send_status = IB_WC_REM_ACCESS_ERR;
459 wc.status = IB_WC_LOC_PROT_ERR;
461 /* responder goes to error state */
462 rvt_rc_error(qp, wc.status);
465 spin_lock_irqsave(&sqp->s_lock, flags);
466 hfi1_send_complete(sqp, wqe, send_status);
467 if (sqp->ibqp.qp_type == IB_QPT_RC) {
468 int lastwqe = rvt_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
470 sqp->s_flags &= ~RVT_S_BUSY;
471 spin_unlock_irqrestore(&sqp->s_lock, flags);
475 ev.device = sqp->ibqp.device;
476 ev.element.qp = &sqp->ibqp;
477 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
478 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
483 sqp->s_flags &= ~RVT_S_BUSY;
485 spin_unlock_irqrestore(&sqp->s_lock, flags);
491 * hfi1_make_grh - construct a GRH header
492 * @ibp: a pointer to the IB port
493 * @hdr: a pointer to the GRH header being constructed
494 * @grh: the global route address to send to
495 * @hwords: size of header after grh being sent in dwords
496 * @nwords: the number of 32 bit words of data being sent
498 * Return the size of the header in 32 bit words.
500 u32 hfi1_make_grh(struct hfi1_ibport *ibp, struct ib_grh *hdr,
501 const struct ib_global_route *grh, u32 hwords, u32 nwords)
503 hdr->version_tclass_flow =
504 cpu_to_be32((IB_GRH_VERSION << IB_GRH_VERSION_SHIFT) |
505 (grh->traffic_class << IB_GRH_TCLASS_SHIFT) |
506 (grh->flow_label << IB_GRH_FLOW_SHIFT));
507 hdr->paylen = cpu_to_be16((hwords + nwords) << 2);
508 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
509 hdr->next_hdr = IB_GRH_NEXT_HDR;
510 hdr->hop_limit = grh->hop_limit;
511 /* The SGID is 32-bit aligned. */
512 hdr->sgid.global.subnet_prefix = ibp->rvp.gid_prefix;
513 hdr->sgid.global.interface_id =
514 grh->sgid_index < HFI1_GUIDS_PER_PORT ?
515 get_sguid(ibp, grh->sgid_index) :
516 get_sguid(ibp, HFI1_PORT_GUID_INDEX);
517 hdr->dgid = grh->dgid;
519 /* GRH header size in 32-bit words. */
520 return sizeof(struct ib_grh) / sizeof(u32);
523 #define BTH2_OFFSET (offsetof(struct hfi1_sdma_header, \
524 hdr.ibh.u.oth.bth[2]) / 4)
527 * build_ahg - create ahg in s_ahg
528 * @qp: a pointer to QP
529 * @npsn: the next PSN for the request/response
531 * This routine handles the AHG by allocating an ahg entry and causing the
532 * copy of the first middle.
534 * Subsequent middles use the copied entry, editing the
535 * PSN with 1 or 2 edits.
537 static inline void build_ahg(struct rvt_qp *qp, u32 npsn)
539 struct hfi1_qp_priv *priv = qp->priv;
541 if (unlikely(qp->s_flags & HFI1_S_AHG_CLEAR))
543 if (!(qp->s_flags & HFI1_S_AHG_VALID)) {
544 /* first middle that needs copy */
545 if (qp->s_ahgidx < 0)
546 qp->s_ahgidx = sdma_ahg_alloc(priv->s_sde);
547 if (qp->s_ahgidx >= 0) {
549 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_AHG_COPY;
550 /* save to protect a change in another thread */
551 priv->s_ahg->ahgidx = qp->s_ahgidx;
552 qp->s_flags |= HFI1_S_AHG_VALID;
555 /* subsequent middle after valid */
556 if (qp->s_ahgidx >= 0) {
557 priv->s_ahg->tx_flags |= SDMA_TXREQ_F_USE_AHG;
558 priv->s_ahg->ahgidx = qp->s_ahgidx;
559 priv->s_ahg->ahgcount++;
560 priv->s_ahg->ahgdesc[0] =
561 sdma_build_ahg_descriptor(
562 (__force u16)cpu_to_be16((u16)npsn),
566 if ((npsn & 0xffff0000) !=
567 (qp->s_ahgpsn & 0xffff0000)) {
568 priv->s_ahg->ahgcount++;
569 priv->s_ahg->ahgdesc[1] =
570 sdma_build_ahg_descriptor(
571 (__force u16)cpu_to_be16(
581 static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
582 struct ib_other_headers *ohdr,
583 u32 bth0, u32 bth1, u32 bth2)
585 bth1 |= qp->remote_qpn;
586 ohdr->bth[0] = cpu_to_be32(bth0);
587 ohdr->bth[1] = cpu_to_be32(bth1);
588 ohdr->bth[2] = cpu_to_be32(bth2);
592 * hfi1_make_ruc_header_16B - build a 16B header
593 * @qp: the queue pair
594 * @ohdr: a pointer to the destination header memory
595 * @bth0: bth0 passed in from the RC/UC builder
596 * @bth2: bth2 passed in from the RC/UC builder
597 * @middle: non zero implies indicates ahg "could" be used
598 * @ps: the current packet state
600 * This routine may disarm ahg under these situations:
601 * - packet needs a GRH
603 * - migration state not IB_MIG_MIGRATED
605 static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
606 struct ib_other_headers *ohdr,
607 u32 bth0, u32 bth2, int middle,
608 struct hfi1_pkt_state *ps)
610 struct hfi1_qp_priv *priv = qp->priv;
611 struct hfi1_ibport *ibp = ps->ibp;
612 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
615 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
616 u8 l4 = OPA_16B_L4_IB_LOCAL;
617 u8 extra_bytes = hfi1_get_16b_padding(
618 (ps->s_txreq->hdr_dwords << 2),
619 ps->s_txreq->s_cur_size);
620 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
621 extra_bytes + SIZE_OF_LT) >> 2);
624 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
625 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
627 struct ib_global_route *grd =
628 rdma_ah_retrieve_grh(&qp->remote_ah_attr);
630 * Ensure OPA GIDs are transformed to IB gids
631 * before creating the GRH.
633 if (grd->sgid_index == OPA_GID_INDEX)
635 grh = &ps->s_txreq->phdr.hdr.opah.u.l.grh;
636 l4 = OPA_16B_L4_IB_GLOBAL;
637 ps->s_txreq->hdr_dwords +=
638 hfi1_make_grh(ibp, grh, grd,
639 ps->s_txreq->hdr_dwords - LRH_16B_DWORDS,
644 if (qp->s_mig_state == IB_MIG_MIGRATED)
645 bth1 |= OPA_BTH_MIG_REQ;
649 if (qp->s_flags & RVT_S_ECN) {
650 qp->s_flags &= ~RVT_S_ECN;
651 /* we recently received a FECN, so return a BECN */
658 qp->s_flags &= ~HFI1_S_AHG_VALID;
661 bth0 |= extra_bytes << 20;
662 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
665 slid = be32_to_cpu(OPA_LID_PERMISSIVE);
668 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
669 ((1 << ppd->lmc) - 1));
671 hfi1_make_16b_hdr(&ps->s_txreq->phdr.hdr.opah,
673 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
675 (ps->s_txreq->hdr_dwords + nwords) >> 1,
676 pkey, becn, 0, l4, priv->s_sc);
680 * hfi1_make_ruc_header_9B - build a 9B header
681 * @qp: the queue pair
682 * @ohdr: a pointer to the destination header memory
683 * @bth0: bth0 passed in from the RC/UC builder
684 * @bth2: bth2 passed in from the RC/UC builder
685 * @middle: non zero implies indicates ahg "could" be used
686 * @ps: the current packet state
688 * This routine may disarm ahg under these situations:
689 * - packet needs a GRH
691 * - migration state not IB_MIG_MIGRATED
693 static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
694 struct ib_other_headers *ohdr,
695 u32 bth0, u32 bth2, int middle,
696 struct hfi1_pkt_state *ps)
698 struct hfi1_qp_priv *priv = qp->priv;
699 struct hfi1_ibport *ibp = ps->ibp;
701 u16 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
702 u16 lrh0 = HFI1_LRH_BTH;
703 u8 extra_bytes = -ps->s_txreq->s_cur_size & 3;
704 u32 nwords = SIZE_OF_CRC + ((ps->s_txreq->s_cur_size +
707 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
708 struct ib_grh *grh = &ps->s_txreq->phdr.hdr.ibh.u.l.grh;
711 ps->s_txreq->hdr_dwords +=
712 hfi1_make_grh(ibp, grh,
713 rdma_ah_read_grh(&qp->remote_ah_attr),
714 ps->s_txreq->hdr_dwords - LRH_9B_DWORDS,
718 lrh0 |= (priv->s_sc & 0xf) << 12 |
719 (rdma_ah_get_sl(&qp->remote_ah_attr) & 0xf) << 4;
721 if (qp->s_mig_state == IB_MIG_MIGRATED)
722 bth0 |= IB_BTH_MIG_REQ;
726 if (qp->s_flags & RVT_S_ECN) {
727 qp->s_flags &= ~RVT_S_ECN;
728 /* we recently received a FECN, so return a BECN */
729 bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
735 qp->s_flags &= ~HFI1_S_AHG_VALID;
738 bth0 |= extra_bytes << 20;
739 hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
740 hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
742 ps->s_txreq->hdr_dwords + nwords,
743 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
744 ppd_from_ibp(ibp)->lid |
745 rdma_ah_get_path_bits(&qp->remote_ah_attr));
748 typedef void (*hfi1_make_ruc_hdr)(struct rvt_qp *qp,
749 struct ib_other_headers *ohdr,
750 u32 bth0, u32 bth2, int middle,
751 struct hfi1_pkt_state *ps);
753 /* We support only two types - 9B and 16B for now */
754 static const hfi1_make_ruc_hdr hfi1_ruc_header_tbl[2] = {
755 [HFI1_PKT_TYPE_9B] = &hfi1_make_ruc_header_9B,
756 [HFI1_PKT_TYPE_16B] = &hfi1_make_ruc_header_16B
759 void hfi1_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr,
760 u32 bth0, u32 bth2, int middle,
761 struct hfi1_pkt_state *ps)
763 struct hfi1_qp_priv *priv = qp->priv;
766 * reset s_ahg/AHG fields
768 * This insures that the ahgentry/ahgcount
769 * are at a non-AHG default to protect
770 * build_verbs_tx_desc() from using
773 * build_ahg() will modify as appropriate
774 * to use the AHG feature.
776 priv->s_ahg->tx_flags = 0;
777 priv->s_ahg->ahgcount = 0;
778 priv->s_ahg->ahgidx = 0;
780 /* Make the appropriate header */
781 hfi1_ruc_header_tbl[priv->hdr_type](qp, ohdr, bth0, bth2, middle, ps);
784 /* when sending, force a reschedule every one of these periods */
785 #define SEND_RESCHED_TIMEOUT (5 * HZ) /* 5s in jiffies */
788 * schedule_send_yield - test for a yield required for QP send engine
789 * @timeout: Final time for timeout slice for jiffies
790 * @qp: a pointer to QP
791 * @ps: a pointer to a structure with commonly lookup values for
792 * the the send engine progress
794 * This routine checks if the time slice for the QP has expired
795 * for RC QPs, if so an additional work entry is queued. At this
796 * point, other QPs have an opportunity to be scheduled. It
797 * returns true if a yield is required, otherwise, false
800 static bool schedule_send_yield(struct rvt_qp *qp,
801 struct hfi1_pkt_state *ps)
803 ps->pkts_sent = true;
805 if (unlikely(time_after(jiffies, ps->timeout))) {
806 if (!ps->in_thread ||
807 workqueue_congested(ps->cpu, ps->ppd->hfi1_wq)) {
808 spin_lock_irqsave(&qp->s_lock, ps->flags);
809 qp->s_flags &= ~RVT_S_BUSY;
810 hfi1_schedule_send(qp);
811 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
812 this_cpu_inc(*ps->ppd->dd->send_schedule);
813 trace_hfi1_rc_expired_time_slice(qp, true);
818 this_cpu_inc(*ps->ppd->dd->send_schedule);
819 ps->timeout = jiffies + ps->timeout_int;
822 trace_hfi1_rc_expired_time_slice(qp, false);
826 void hfi1_do_send_from_rvt(struct rvt_qp *qp)
828 hfi1_do_send(qp, false);
831 void _hfi1_do_send(struct work_struct *work)
833 struct iowait *wait = container_of(work, struct iowait, iowork);
834 struct rvt_qp *qp = iowait_to_qp(wait);
836 hfi1_do_send(qp, true);
840 * hfi1_do_send - perform a send on a QP
841 * @work: contains a pointer to the QP
842 * @in_thread: true if in a workqueue thread
844 * Process entries in the send work queue until credit or queue is
845 * exhausted. Only allow one CPU to send a packet per QP.
846 * Otherwise, two threads could send packets out of order.
848 void hfi1_do_send(struct rvt_qp *qp, bool in_thread)
850 struct hfi1_pkt_state ps;
851 struct hfi1_qp_priv *priv = qp->priv;
852 int (*make_req)(struct rvt_qp *qp, struct hfi1_pkt_state *ps);
854 ps.dev = to_idev(qp->ibqp.device);
855 ps.ibp = to_iport(qp->ibqp.device, qp->port_num);
856 ps.ppd = ppd_from_ibp(ps.ibp);
857 ps.in_thread = in_thread;
859 trace_hfi1_rc_do_send(qp, in_thread);
861 switch (qp->ibqp.qp_type) {
863 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
864 ~((1 << ps.ppd->lmc) - 1)) ==
869 make_req = hfi1_make_rc_req;
870 ps.timeout_int = qp->timeout_jiffies;
873 if (!loopback && ((rdma_ah_get_dlid(&qp->remote_ah_attr) &
874 ~((1 << ps.ppd->lmc) - 1)) ==
879 make_req = hfi1_make_uc_req;
880 ps.timeout_int = SEND_RESCHED_TIMEOUT;
883 make_req = hfi1_make_ud_req;
884 ps.timeout_int = SEND_RESCHED_TIMEOUT;
887 spin_lock_irqsave(&qp->s_lock, ps.flags);
889 /* Return if we are already busy processing a work request. */
890 if (!hfi1_send_ok(qp)) {
891 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
895 qp->s_flags |= RVT_S_BUSY;
897 ps.timeout_int = ps.timeout_int / 8;
898 ps.timeout = jiffies + ps.timeout_int;
899 ps.cpu = priv->s_sde ? priv->s_sde->cpu :
900 cpumask_first(cpumask_of_node(ps.ppd->dd->node));
901 ps.pkts_sent = false;
903 /* insure a pre-built packet is handled */
904 ps.s_txreq = get_waiting_verbs_txreq(qp);
906 /* Check for a constructed packet to be sent. */
908 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
910 * If the packet cannot be sent now, return and
911 * the send engine will be woken up later.
913 if (hfi1_verbs_send(qp, &ps))
915 /* allow other tasks to run */
916 if (schedule_send_yield(qp, &ps))
919 spin_lock_irqsave(&qp->s_lock, ps.flags);
921 } while (make_req(qp, &ps));
922 iowait_starve_clear(ps.pkts_sent, &priv->s_iowait);
923 spin_unlock_irqrestore(&qp->s_lock, ps.flags);
927 * This should be called with s_lock held.
929 void hfi1_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe,
930 enum ib_wc_status status)
934 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_OR_FLUSH_SEND))
939 trace_hfi1_qp_send_completion(qp, wqe, last);
940 if (++last >= qp->s_size)
942 trace_hfi1_qp_send_completion(qp, wqe, last);
944 /* See post_send() */
947 if (qp->ibqp.qp_type == IB_QPT_UD ||
948 qp->ibqp.qp_type == IB_QPT_SMI ||
949 qp->ibqp.qp_type == IB_QPT_GSI)
950 atomic_dec(&ibah_to_rvtah(wqe->ud_wr.ah)->refcount);
952 rvt_qp_swqe_complete(qp,
954 ib_hfi1_wc_opcode[wqe->wr.opcode],
957 if (qp->s_acked == old_last)
959 if (qp->s_cur == old_last)
961 if (qp->s_tail == old_last)
963 if (qp->state == IB_QPS_SQD && last == qp->s_cur)