2 * Copyright(c) 2015 - 2018 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
49 #include <rdma/rdma_vt.h>
50 #include <rdma/rdmavt_qp.h>
54 #include "verbs_txreq.h"
57 /* cut down ridiculously long IB macro names */
58 #define OP(x) RC_OP(x)
60 static u32 restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe,
65 len = delta_psn(psn, wqe->psn) * pmtu;
66 ss->sge = wqe->sg_list[0];
67 ss->sg_list = wqe->sg_list + 1;
68 ss->num_sge = wqe->wr.num_sge;
69 ss->total_len = wqe->length;
70 rvt_skip_sge(ss, len, false);
71 return wqe->length - len;
75 * make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
76 * @dev: the device for this QP
77 * @qp: a pointer to the QP
78 * @ohdr: a pointer to the IB header being constructed
79 * @ps: the xmit packet state
81 * Return 1 if constructed; otherwise, return 0.
82 * Note that we are in the responder's side of the QP context.
83 * Note the QP s_lock must be held.
85 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
86 struct ib_other_headers *ohdr,
87 struct hfi1_pkt_state *ps)
89 struct rvt_ack_entry *e;
96 struct hfi1_qp_priv *priv = qp->priv;
98 lockdep_assert_held(&qp->s_lock);
99 /* Don't send an ACK if we aren't supposed to. */
100 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
103 if (priv->hdr_type == HFI1_PKT_TYPE_9B)
104 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
107 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
110 switch (qp->s_ack_state) {
111 case OP(RDMA_READ_RESPONSE_LAST):
112 case OP(RDMA_READ_RESPONSE_ONLY):
113 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
114 if (e->rdma_sge.mr) {
115 rvt_put_mr(e->rdma_sge.mr);
116 e->rdma_sge.mr = NULL;
119 case OP(ATOMIC_ACKNOWLEDGE):
121 * We can increment the tail pointer now that the last
122 * response has been sent instead of only being
125 if (++qp->s_tail_ack_queue > HFI1_MAX_RDMA_ATOMIC)
126 qp->s_tail_ack_queue = 0;
129 case OP(ACKNOWLEDGE):
130 /* Check for no next entry in the queue. */
131 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
132 if (qp->s_flags & RVT_S_ACK_PENDING)
137 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
138 if (e->opcode == OP(RDMA_READ_REQUEST)) {
140 * If a RDMA read response is being resent and
141 * we haven't seen the duplicate request yet,
142 * then stop sending the remaining responses the
143 * responder has seen until the requester re-sends it.
145 len = e->rdma_sge.sge_length;
146 if (len && !e->rdma_sge.mr) {
147 qp->s_tail_ack_queue = qp->r_head_ack_queue;
150 /* Copy SGE state in case we need to resend */
151 ps->s_txreq->mr = e->rdma_sge.mr;
153 rvt_get_mr(ps->s_txreq->mr);
154 qp->s_ack_rdma_sge.sge = e->rdma_sge;
155 qp->s_ack_rdma_sge.num_sge = 1;
156 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
159 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
161 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
164 ohdr->u.aeth = rvt_compute_aeth(qp);
166 qp->s_ack_rdma_psn = e->psn;
167 bth2 = mask_psn(qp->s_ack_rdma_psn++);
169 /* COMPARE_SWAP or FETCH_ADD */
170 ps->s_txreq->ss = NULL;
172 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
173 ohdr->u.at.aeth = rvt_compute_aeth(qp);
174 ib_u64_put(e->atomic_data, &ohdr->u.at.atomic_ack_eth);
175 hwords += sizeof(ohdr->u.at) / sizeof(u32);
176 bth2 = mask_psn(e->psn);
179 bth0 = qp->s_ack_state << 24;
182 case OP(RDMA_READ_RESPONSE_FIRST):
183 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
185 case OP(RDMA_READ_RESPONSE_MIDDLE):
186 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
187 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
189 rvt_get_mr(ps->s_txreq->mr);
190 len = qp->s_ack_rdma_sge.sge.sge_length;
193 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
195 ohdr->u.aeth = rvt_compute_aeth(qp);
197 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
198 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
201 bth0 = qp->s_ack_state << 24;
202 bth2 = mask_psn(qp->s_ack_rdma_psn++);
208 * Send a regular ACK.
209 * Set the s_ack_state so we wait until after sending
210 * the ACK before setting s_ack_state to ACKNOWLEDGE
213 qp->s_ack_state = OP(SEND_ONLY);
214 qp->s_flags &= ~RVT_S_ACK_PENDING;
215 ps->s_txreq->ss = NULL;
218 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
220 IB_AETH_CREDIT_SHIFT));
222 ohdr->u.aeth = rvt_compute_aeth(qp);
225 bth0 = OP(ACKNOWLEDGE) << 24;
226 bth2 = mask_psn(qp->s_ack_psn);
228 qp->s_rdma_ack_cnt++;
229 ps->s_txreq->sde = priv->s_sde;
230 ps->s_txreq->s_cur_size = len;
231 ps->s_txreq->hdr_dwords = hwords;
232 hfi1_make_ruc_header(qp, ohdr, bth0, bth2, middle, ps);
236 qp->s_ack_state = OP(ACKNOWLEDGE);
238 * Ensure s_rdma_ack_cnt changes are committed prior to resetting
242 qp->s_flags &= ~(RVT_S_RESP_PENDING
249 * hfi1_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
250 * @qp: a pointer to the QP
252 * Assumes s_lock is held.
254 * Return 1 if constructed; otherwise, return 0.
256 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
258 struct hfi1_qp_priv *priv = qp->priv;
259 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
260 struct ib_other_headers *ohdr;
261 struct rvt_sge_state *ss;
262 struct rvt_swqe *wqe;
272 lockdep_assert_held(&qp->s_lock);
273 ps->s_txreq = get_txreq(ps->dev, qp);
277 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
278 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
280 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
281 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.l.oth;
283 ohdr = &ps->s_txreq->phdr.hdr.ibh.u.oth;
285 /* header size in 32-bit words 16B LRH+BTH = (16+12)/4. */
287 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
288 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
289 ohdr = &ps->s_txreq->phdr.hdr.opah.u.l.oth;
291 ohdr = &ps->s_txreq->phdr.hdr.opah.u.oth;
294 /* Sending responses has higher priority over sending requests. */
295 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
296 make_rc_ack(dev, qp, ohdr, ps))
299 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
300 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
302 /* We are in the error state, flush the work request. */
303 if (qp->s_last == READ_ONCE(qp->s_head))
305 /* If DMAs are in progress, we can't flush immediately. */
306 if (iowait_sdma_pending(&priv->s_iowait)) {
307 qp->s_flags |= RVT_S_WAIT_DMA;
311 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
312 hfi1_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
313 IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
314 /* will get called again */
318 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK))
321 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
322 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
323 qp->s_flags |= RVT_S_WAIT_PSN;
326 qp->s_sending_psn = qp->s_psn;
327 qp->s_sending_hpsn = qp->s_psn - 1;
330 /* Send a request. */
331 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
332 switch (qp->s_state) {
334 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
337 * Resend an old request or start a new one.
339 * We keep track of the current SWQE so that
340 * we don't reset the "furthest progress" state
341 * if we need to back up.
344 if (qp->s_cur == qp->s_tail) {
345 /* Check if send work queue is empty. */
346 if (qp->s_tail == READ_ONCE(qp->s_head)) {
351 * If a fence is requested, wait for previous
352 * RDMA read and atomic operations to finish.
354 if ((wqe->wr.send_flags & IB_SEND_FENCE) &&
355 qp->s_num_rd_atomic) {
356 qp->s_flags |= RVT_S_WAIT_FENCE;
360 * Local operations are processed immediately
361 * after all prior requests have completed
363 if (wqe->wr.opcode == IB_WR_REG_MR ||
364 wqe->wr.opcode == IB_WR_LOCAL_INV) {
368 if (qp->s_last != qp->s_cur)
370 if (++qp->s_cur == qp->s_size)
372 if (++qp->s_tail == qp->s_size)
374 if (!(wqe->wr.send_flags &
375 RVT_SEND_COMPLETION_ONLY)) {
376 err = rvt_invalidate_rkey(
378 wqe->wr.ex.invalidate_rkey);
381 hfi1_send_complete(qp, wqe,
382 err ? IB_WC_LOC_PROT_ERR
385 atomic_dec(&qp->local_ops_pending);
390 qp->s_psn = wqe->psn;
393 * Note that we have to be careful not to modify the
394 * original work request since we may need to resend
399 bth2 = mask_psn(qp->s_psn);
400 switch (wqe->wr.opcode) {
402 case IB_WR_SEND_WITH_IMM:
403 case IB_WR_SEND_WITH_INV:
404 /* If no credit, return. */
405 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
406 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
407 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
411 qp->s_state = OP(SEND_FIRST);
415 if (wqe->wr.opcode == IB_WR_SEND) {
416 qp->s_state = OP(SEND_ONLY);
417 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
418 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
419 /* Immediate data comes after the BTH */
420 ohdr->u.imm_data = wqe->wr.ex.imm_data;
423 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
424 /* Invalidate rkey comes after the BTH */
425 ohdr->u.ieth = cpu_to_be32(
426 wqe->wr.ex.invalidate_rkey);
429 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
430 bth0 |= IB_BTH_SOLICITED;
431 bth2 |= IB_BTH_REQ_ACK;
432 if (++qp->s_cur == qp->s_size)
436 case IB_WR_RDMA_WRITE:
437 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
439 goto no_flow_control;
440 case IB_WR_RDMA_WRITE_WITH_IMM:
441 /* If no credit, return. */
442 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT) &&
443 rvt_cmp_msn(wqe->ssn, qp->s_lsn + 1) > 0) {
444 qp->s_flags |= RVT_S_WAIT_SSN_CREDIT;
449 wqe->rdma_wr.remote_addr,
451 ohdr->u.rc.reth.rkey =
452 cpu_to_be32(wqe->rdma_wr.rkey);
453 ohdr->u.rc.reth.length = cpu_to_be32(len);
454 hwords += sizeof(struct ib_reth) / sizeof(u32);
456 qp->s_state = OP(RDMA_WRITE_FIRST);
460 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
461 qp->s_state = OP(RDMA_WRITE_ONLY);
464 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
465 /* Immediate data comes after RETH */
466 ohdr->u.rc.imm_data = wqe->wr.ex.imm_data;
468 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
469 bth0 |= IB_BTH_SOLICITED;
471 bth2 |= IB_BTH_REQ_ACK;
472 if (++qp->s_cur == qp->s_size)
476 case IB_WR_RDMA_READ:
478 * Don't allow more operations to be started
479 * than the QP limits allow.
482 if (qp->s_num_rd_atomic >=
483 qp->s_max_rd_atomic) {
484 qp->s_flags |= RVT_S_WAIT_RDMAR;
487 qp->s_num_rd_atomic++;
488 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
492 wqe->rdma_wr.remote_addr,
494 ohdr->u.rc.reth.rkey =
495 cpu_to_be32(wqe->rdma_wr.rkey);
496 ohdr->u.rc.reth.length = cpu_to_be32(len);
497 qp->s_state = OP(RDMA_READ_REQUEST);
498 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
501 bth2 |= IB_BTH_REQ_ACK;
502 if (++qp->s_cur == qp->s_size)
506 case IB_WR_ATOMIC_CMP_AND_SWP:
507 case IB_WR_ATOMIC_FETCH_AND_ADD:
509 * Don't allow more operations to be started
510 * than the QP limits allow.
513 if (qp->s_num_rd_atomic >=
514 qp->s_max_rd_atomic) {
515 qp->s_flags |= RVT_S_WAIT_RDMAR;
518 qp->s_num_rd_atomic++;
519 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
522 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
523 qp->s_state = OP(COMPARE_SWAP);
524 put_ib_ateth_swap(wqe->atomic_wr.swap,
525 &ohdr->u.atomic_eth);
526 put_ib_ateth_compare(wqe->atomic_wr.compare_add,
527 &ohdr->u.atomic_eth);
529 qp->s_state = OP(FETCH_ADD);
530 put_ib_ateth_swap(wqe->atomic_wr.compare_add,
531 &ohdr->u.atomic_eth);
532 put_ib_ateth_compare(0, &ohdr->u.atomic_eth);
534 put_ib_ateth_vaddr(wqe->atomic_wr.remote_addr,
535 &ohdr->u.atomic_eth);
536 ohdr->u.atomic_eth.rkey = cpu_to_be32(
537 wqe->atomic_wr.rkey);
538 hwords += sizeof(struct ib_atomic_eth) / sizeof(u32);
541 bth2 |= IB_BTH_REQ_ACK;
542 if (++qp->s_cur == qp->s_size)
549 qp->s_sge.sge = wqe->sg_list[0];
550 qp->s_sge.sg_list = wqe->sg_list + 1;
551 qp->s_sge.num_sge = wqe->wr.num_sge;
552 qp->s_sge.total_len = wqe->length;
553 qp->s_len = wqe->length;
556 if (qp->s_tail >= qp->s_size)
559 if (wqe->wr.opcode == IB_WR_RDMA_READ)
560 qp->s_psn = wqe->lpsn + 1;
565 case OP(RDMA_READ_RESPONSE_FIRST):
567 * qp->s_state is normally set to the opcode of the
568 * last packet constructed for new requests and therefore
569 * is never set to RDMA read response.
570 * RDMA_READ_RESPONSE_FIRST is used by the ACK processing
571 * thread to indicate a SEND needs to be restarted from an
572 * earlier PSN without interfering with the sending thread.
575 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
578 qp->s_state = OP(SEND_MIDDLE);
580 case OP(SEND_MIDDLE):
581 bth2 = mask_psn(qp->s_psn++);
586 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
589 if (wqe->wr.opcode == IB_WR_SEND) {
590 qp->s_state = OP(SEND_LAST);
591 } else if (wqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
592 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
593 /* Immediate data comes after the BTH */
594 ohdr->u.imm_data = wqe->wr.ex.imm_data;
597 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
598 /* invalidate data comes after the BTH */
599 ohdr->u.ieth = cpu_to_be32(wqe->wr.ex.invalidate_rkey);
602 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
603 bth0 |= IB_BTH_SOLICITED;
604 bth2 |= IB_BTH_REQ_ACK;
606 if (qp->s_cur >= qp->s_size)
610 case OP(RDMA_READ_RESPONSE_LAST):
612 * qp->s_state is normally set to the opcode of the
613 * last packet constructed for new requests and therefore
614 * is never set to RDMA read response.
615 * RDMA_READ_RESPONSE_LAST is used by the ACK processing
616 * thread to indicate a RDMA write needs to be restarted from
617 * an earlier PSN without interfering with the sending thread.
620 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
622 case OP(RDMA_WRITE_FIRST):
623 qp->s_state = OP(RDMA_WRITE_MIDDLE);
625 case OP(RDMA_WRITE_MIDDLE):
626 bth2 = mask_psn(qp->s_psn++);
631 middle = HFI1_CAP_IS_KSET(SDMA_AHG);
634 if (wqe->wr.opcode == IB_WR_RDMA_WRITE) {
635 qp->s_state = OP(RDMA_WRITE_LAST);
637 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
638 /* Immediate data comes after the BTH */
639 ohdr->u.imm_data = wqe->wr.ex.imm_data;
641 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
642 bth0 |= IB_BTH_SOLICITED;
644 bth2 |= IB_BTH_REQ_ACK;
646 if (qp->s_cur >= qp->s_size)
650 case OP(RDMA_READ_RESPONSE_MIDDLE):
652 * qp->s_state is normally set to the opcode of the
653 * last packet constructed for new requests and therefore
654 * is never set to RDMA read response.
655 * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing
656 * thread to indicate a RDMA read needs to be restarted from
657 * an earlier PSN without interfering with the sending thread.
660 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
662 wqe->rdma_wr.remote_addr + len,
664 ohdr->u.rc.reth.rkey =
665 cpu_to_be32(wqe->rdma_wr.rkey);
666 ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len);
667 qp->s_state = OP(RDMA_READ_REQUEST);
668 hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32);
669 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
670 qp->s_psn = wqe->lpsn + 1;
674 if (qp->s_cur == qp->s_size)
678 qp->s_sending_hpsn = bth2;
679 delta = delta_psn(bth2, wqe->psn);
680 if (delta && delta % HFI1_PSN_CREDIT == 0)
681 bth2 |= IB_BTH_REQ_ACK;
682 if (qp->s_flags & RVT_S_SEND_ONE) {
683 qp->s_flags &= ~RVT_S_SEND_ONE;
684 qp->s_flags |= RVT_S_WAIT_ACK;
685 bth2 |= IB_BTH_REQ_ACK;
688 ps->s_txreq->hdr_dwords = hwords;
689 ps->s_txreq->sde = priv->s_sde;
690 ps->s_txreq->ss = ss;
691 ps->s_txreq->s_cur_size = len;
692 hfi1_make_ruc_header(
695 bth0 | (qp->s_state << 24),
702 hfi1_put_txreq(ps->s_txreq);
707 hfi1_put_txreq(ps->s_txreq);
711 qp->s_flags &= ~RVT_S_BUSY;
715 static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
716 struct ib_other_headers *ohdr,
720 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
722 IB_AETH_CREDIT_SHIFT));
724 ohdr->u.aeth = rvt_compute_aeth(qp);
726 ohdr->bth[0] = cpu_to_be32(bth0);
727 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
728 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
731 static inline void hfi1_queue_rc_ack(struct hfi1_packet *packet, bool is_fecn)
733 struct rvt_qp *qp = packet->qp;
734 struct hfi1_ibport *ibp;
737 spin_lock_irqsave(&qp->s_lock, flags);
738 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
740 ibp = rcd_to_iport(packet->rcd);
741 this_cpu_inc(*ibp->rvp.rc_qacks);
742 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
743 qp->s_nak_state = qp->r_nak_state;
744 qp->s_ack_psn = qp->r_ack_psn;
746 qp->s_flags |= RVT_S_ECN;
748 /* Schedule the send tasklet. */
749 hfi1_schedule_send(qp);
751 spin_unlock_irqrestore(&qp->s_lock, flags);
754 static inline void hfi1_make_rc_ack_9B(struct hfi1_packet *packet,
755 struct hfi1_opa_header *opa_hdr,
756 u8 sc5, bool is_fecn,
757 u64 *pbc_flags, u32 *hwords,
760 struct rvt_qp *qp = packet->qp;
761 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
762 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
763 struct ib_header *hdr = &opa_hdr->ibh;
764 struct ib_other_headers *ohdr;
765 u16 lrh0 = HFI1_LRH_BTH;
769 opa_hdr->hdr_type = HFI1_PKT_TYPE_9B;
771 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4 */
774 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
775 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
776 rdma_ah_read_grh(&qp->remote_ah_attr),
777 *hwords - 2, SIZE_OF_CRC);
778 ohdr = &hdr->u.l.oth;
781 /* set PBC_DC_INFO bit (aka SC[4]) in pbc_flags */
782 *pbc_flags |= ((!!(sc5 & 0x10)) << PBC_DC_INFO_SHIFT);
784 /* read pkey_index w/o lock (its atomic) */
785 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
787 lrh0 |= (sc5 & IB_SC_MASK) << IB_SC_SHIFT |
788 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
791 hfi1_make_ib_hdr(hdr, lrh0, *hwords + SIZE_OF_CRC,
792 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
793 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
795 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
796 if (qp->s_mig_state == IB_MIG_MIGRATED)
797 bth0 |= IB_BTH_MIG_REQ;
798 bth1 = (!!is_fecn) << IB_BECN_SHIFT;
799 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
802 static inline void hfi1_make_rc_ack_16B(struct hfi1_packet *packet,
803 struct hfi1_opa_header *opa_hdr,
804 u8 sc5, bool is_fecn,
805 u64 *pbc_flags, u32 *hwords,
808 struct rvt_qp *qp = packet->qp;
809 struct hfi1_ibport *ibp = rcd_to_iport(packet->rcd);
810 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
811 struct hfi1_16b_header *hdr = &opa_hdr->opah;
812 struct ib_other_headers *ohdr;
816 u8 l4 = OPA_16B_L4_IB_LOCAL;
819 opa_hdr->hdr_type = HFI1_PKT_TYPE_16B;
821 /* header size in 32-bit words 16B LRH+BTH+AETH = (16+12+4)/4 */
823 extra_bytes = hfi1_get_16b_padding(*hwords << 2, 0);
824 *nwords = SIZE_OF_CRC + ((extra_bytes + SIZE_OF_LT) >> 2);
826 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
827 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
828 *hwords += hfi1_make_grh(ibp, &hdr->u.l.grh,
829 rdma_ah_read_grh(&qp->remote_ah_attr),
830 *hwords - 4, *nwords);
831 ohdr = &hdr->u.l.oth;
832 l4 = OPA_16B_L4_IB_GLOBAL;
834 *pbc_flags |= PBC_PACKET_BYPASS | PBC_INSERT_BYPASS_ICRC;
836 /* read pkey_index w/o lock (its atomic) */
837 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
839 /* Convert dwords to flits */
840 len = (*hwords + *nwords) >> 1;
842 hfi1_make_16b_hdr(hdr, ppd->lid |
843 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
844 ((1 << ppd->lmc) - 1)),
845 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
846 16B), len, pkey, becn, 0, l4, sc5);
848 bth0 = pkey | (OP(ACKNOWLEDGE) << 24);
849 bth0 |= extra_bytes << 20;
850 if (qp->s_mig_state == IB_MIG_MIGRATED)
851 bth1 = OPA_BTH_MIG_REQ;
852 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
855 typedef void (*hfi1_make_rc_ack)(struct hfi1_packet *packet,
856 struct hfi1_opa_header *opa_hdr,
857 u8 sc5, bool is_fecn,
858 u64 *pbc_flags, u32 *hwords,
861 /* We support only two types - 9B and 16B for now */
862 static const hfi1_make_rc_ack hfi1_make_rc_ack_tbl[2] = {
863 [HFI1_PKT_TYPE_9B] = &hfi1_make_rc_ack_9B,
864 [HFI1_PKT_TYPE_16B] = &hfi1_make_rc_ack_16B
868 * hfi1_send_rc_ack - Construct an ACK packet and send it
869 * @qp: a pointer to the QP
871 * This is called from hfi1_rc_rcv() and handle_receive_interrupt().
872 * Note that RDMA reads and atomics are handled in the
873 * send side QP state and send engine.
875 void hfi1_send_rc_ack(struct hfi1_packet *packet, bool is_fecn)
877 struct hfi1_ctxtdata *rcd = packet->rcd;
878 struct rvt_qp *qp = packet->qp;
879 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
880 struct hfi1_qp_priv *priv = qp->priv;
881 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
882 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
883 u64 pbc, pbc_flags = 0;
887 struct pio_buf *pbuf;
888 struct hfi1_opa_header opa_hdr;
890 /* clear the defer count */
893 /* Don't send ACK or NAK if a RDMA read or atomic is pending. */
894 if (qp->s_flags & RVT_S_RESP_PENDING) {
895 hfi1_queue_rc_ack(packet, is_fecn);
899 /* Ensure s_rdma_ack_cnt changes are committed */
900 if (qp->s_rdma_ack_cnt) {
901 hfi1_queue_rc_ack(packet, is_fecn);
905 /* Don't try to send ACKs if the link isn't ACTIVE */
906 if (driver_lstate(ppd) != IB_PORT_ACTIVE)
909 /* Make the appropriate header */
910 hfi1_make_rc_ack_tbl[priv->hdr_type](packet, &opa_hdr, sc5, is_fecn,
911 &pbc_flags, &hwords, &nwords);
913 plen = 2 /* PBC */ + hwords + nwords;
914 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
915 sc_to_vlt(ppd->dd, sc5), plen);
916 pbuf = sc_buffer_alloc(rcd->sc, plen, NULL, NULL);
917 if (IS_ERR_OR_NULL(pbuf)) {
919 * We have no room to send at the moment. Pass
920 * responsibility for sending the ACK to the send engine
921 * so that when enough buffer space becomes available,
922 * the ACK is sent ahead of other outgoing packets.
924 hfi1_queue_rc_ack(packet, is_fecn);
927 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
928 &opa_hdr, ib_is_sc5(sc5));
930 /* write the pbc and data */
931 ppd->dd->pio_inline_send(ppd->dd, pbuf, pbc,
932 (priv->hdr_type == HFI1_PKT_TYPE_9B ?
933 (void *)&opa_hdr.ibh :
934 (void *)&opa_hdr.opah), hwords);
939 * reset_psn - reset the QP state to send starting from PSN
941 * @psn: the packet sequence number to restart at
943 * This is called from hfi1_rc_rcv() to process an incoming RC ACK
945 * Called at interrupt level with the QP s_lock held.
947 static void reset_psn(struct rvt_qp *qp, u32 psn)
950 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
953 lockdep_assert_held(&qp->s_lock);
957 * If we are starting the request from the beginning,
958 * let the normal send code handle initialization.
960 if (cmp_psn(psn, wqe->psn) <= 0) {
961 qp->s_state = OP(SEND_LAST);
965 /* Find the work request opcode corresponding to the given PSN. */
966 opcode = wqe->wr.opcode;
970 if (++n == qp->s_size)
974 wqe = rvt_get_swqe_ptr(qp, n);
975 diff = cmp_psn(psn, wqe->psn);
980 * If we are starting the request from the beginning,
981 * let the normal send code handle initialization.
984 qp->s_state = OP(SEND_LAST);
987 opcode = wqe->wr.opcode;
991 * Set the state to restart in the middle of a request.
992 * Don't change the s_sge, s_cur_sge, or s_cur_size.
993 * See hfi1_make_rc_req().
997 case IB_WR_SEND_WITH_IMM:
998 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1001 case IB_WR_RDMA_WRITE:
1002 case IB_WR_RDMA_WRITE_WITH_IMM:
1003 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1006 case IB_WR_RDMA_READ:
1007 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1012 * This case shouldn't happen since its only
1015 qp->s_state = OP(SEND_LAST);
1020 * Set RVT_S_WAIT_PSN as rc_complete() may start the timer
1021 * asynchronously before the send engine can get scheduled.
1022 * Doing it in hfi1_make_rc_req() is too late.
1024 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1025 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1026 qp->s_flags |= RVT_S_WAIT_PSN;
1027 qp->s_flags &= ~HFI1_S_AHG_VALID;
1031 * Back up requester to resend the last un-ACKed request.
1032 * The QP r_lock and s_lock should be held and interrupts disabled.
1034 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1036 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1037 struct hfi1_ibport *ibp;
1039 lockdep_assert_held(&qp->r_lock);
1040 lockdep_assert_held(&qp->s_lock);
1041 if (qp->s_retry == 0) {
1042 if (qp->s_mig_state == IB_MIG_ARMED) {
1043 hfi1_migrate_qp(qp);
1044 qp->s_retry = qp->s_retry_cnt;
1045 } else if (qp->s_last == qp->s_acked) {
1046 hfi1_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR);
1047 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1049 } else { /* need to handle delayed completion */
1056 ibp = to_iport(qp->ibqp.device, qp->port_num);
1057 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1058 ibp->rvp.n_rc_resends++;
1060 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1062 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1063 RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_PSN |
1066 qp->s_flags |= RVT_S_SEND_ONE;
1071 * Set qp->s_sending_psn to the next PSN after the given one.
1072 * This would be psn+1 except when RDMA reads are present.
1074 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1076 struct rvt_swqe *wqe;
1079 lockdep_assert_held(&qp->s_lock);
1080 /* Find the work request corresponding to the given PSN. */
1082 wqe = rvt_get_swqe_ptr(qp, n);
1083 if (cmp_psn(psn, wqe->lpsn) <= 0) {
1084 if (wqe->wr.opcode == IB_WR_RDMA_READ)
1085 qp->s_sending_psn = wqe->lpsn + 1;
1087 qp->s_sending_psn = psn + 1;
1090 if (++n == qp->s_size)
1092 if (n == qp->s_tail)
1098 * This should be called with the QP s_lock held and interrupts disabled.
1100 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1102 struct ib_other_headers *ohdr;
1103 struct hfi1_qp_priv *priv = qp->priv;
1104 struct rvt_swqe *wqe;
1105 struct ib_header *hdr = NULL;
1106 struct hfi1_16b_header *hdr_16b = NULL;
1110 lockdep_assert_held(&qp->s_lock);
1111 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
1114 /* Find out where the BTH is */
1115 if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
1117 if (ib_get_lnh(hdr) == HFI1_LRH_BTH)
1120 ohdr = &hdr->u.l.oth;
1124 hdr_16b = &opah->opah;
1125 l4 = hfi1_16B_get_l4(hdr_16b);
1126 if (l4 == OPA_16B_L4_IB_LOCAL)
1127 ohdr = &hdr_16b->u.oth;
1129 ohdr = &hdr_16b->u.l.oth;
1132 opcode = ib_bth_get_opcode(ohdr);
1133 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1134 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1135 WARN_ON(!qp->s_rdma_ack_cnt);
1136 qp->s_rdma_ack_cnt--;
1140 psn = ib_bth_get_psn(ohdr);
1141 reset_sending_psn(qp, psn);
1144 * Start timer after a packet requesting an ACK has been sent and
1145 * there are still requests that haven't been acked.
1147 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1149 (RVT_S_TIMER | RVT_S_WAIT_RNR | RVT_S_WAIT_PSN)) &&
1150 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1151 rvt_add_retry_timer(qp);
1153 while (qp->s_last != qp->s_acked) {
1156 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1157 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1158 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1160 rvt_qp_wqe_unreserve(qp, wqe);
1161 s_last = qp->s_last;
1162 trace_hfi1_qp_send_completion(qp, wqe, s_last);
1163 if (++s_last >= qp->s_size)
1165 qp->s_last = s_last;
1166 /* see post_send() */
1169 rvt_qp_swqe_complete(qp,
1171 ib_hfi1_wc_opcode[wqe->wr.opcode],
1175 * If we were waiting for sends to complete before re-sending,
1176 * and they are now complete, restart sending.
1178 trace_hfi1_sendcomplete(qp, psn);
1179 if (qp->s_flags & RVT_S_WAIT_PSN &&
1180 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1181 qp->s_flags &= ~RVT_S_WAIT_PSN;
1182 qp->s_sending_psn = qp->s_psn;
1183 qp->s_sending_hpsn = qp->s_psn - 1;
1184 hfi1_schedule_send(qp);
1188 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1190 qp->s_last_psn = psn;
1194 * Generate a SWQE completion.
1195 * This is similar to hfi1_send_complete but has to check to be sure
1196 * that the SGEs are not being referenced if the SWQE is being resent.
1198 static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1199 struct rvt_swqe *wqe,
1200 struct hfi1_ibport *ibp)
1202 lockdep_assert_held(&qp->s_lock);
1204 * Don't decrement refcount and don't generate a
1205 * completion if the SWQE is being resent until the send
1208 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1209 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1213 rvt_qp_wqe_unreserve(qp, wqe);
1214 s_last = qp->s_last;
1215 trace_hfi1_qp_send_completion(qp, wqe, s_last);
1216 if (++s_last >= qp->s_size)
1218 qp->s_last = s_last;
1219 /* see post_send() */
1221 rvt_qp_swqe_complete(qp,
1223 ib_hfi1_wc_opcode[wqe->wr.opcode],
1226 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1228 this_cpu_inc(*ibp->rvp.rc_delayed_comp);
1230 * If send progress not running attempt to progress
1233 if (ppd->dd->flags & HFI1_HAS_SEND_DMA) {
1234 struct sdma_engine *engine;
1235 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1238 /* For now use sc to find engine */
1239 sc5 = ibp->sl_to_sc[sl];
1240 engine = qp_to_sdma_engine(qp, sc5);
1241 sdma_engine_progress_schedule(engine);
1245 qp->s_retry = qp->s_retry_cnt;
1246 update_last_psn(qp, wqe->lpsn);
1249 * If we are completing a request which is in the process of
1250 * being resent, we can stop re-sending it since we know the
1251 * responder has already seen it.
1253 if (qp->s_acked == qp->s_cur) {
1254 if (++qp->s_cur >= qp->s_size)
1256 qp->s_acked = qp->s_cur;
1257 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1258 if (qp->s_acked != qp->s_tail) {
1259 qp->s_state = OP(SEND_LAST);
1260 qp->s_psn = wqe->psn;
1263 if (++qp->s_acked >= qp->s_size)
1265 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1267 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1273 * do_rc_ack - process an incoming RC ACK
1274 * @qp: the QP the ACK came in on
1275 * @psn: the packet sequence number of the ACK
1276 * @opcode: the opcode of the request that resulted in the ACK
1278 * This is called from rc_rcv_resp() to process an incoming RC ACK
1280 * May be called at interrupt level, with the QP s_lock held.
1281 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
1283 static int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
1284 u64 val, struct hfi1_ctxtdata *rcd)
1286 struct hfi1_ibport *ibp;
1287 enum ib_wc_status status;
1288 struct rvt_swqe *wqe;
1293 lockdep_assert_held(&qp->s_lock);
1295 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
1296 * requests and implicitly NAK RDMA read and atomic requests issued
1297 * before the NAK'ed request. The MSN won't include the NAK'ed
1298 * request but will include an ACK'ed request(s).
1301 if (aeth >> IB_AETH_NAK_SHIFT)
1303 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1304 ibp = rcd_to_iport(rcd);
1307 * The MSN might be for a later WQE than the PSN indicates so
1308 * only complete WQEs that the PSN finishes.
1310 while ((diff = delta_psn(ack_psn, wqe->lpsn)) >= 0) {
1312 * RDMA_READ_RESPONSE_ONLY is a special case since
1313 * we want to generate completion events for everything
1314 * before the RDMA read, copy the data, then generate
1315 * the completion for the read.
1317 if (wqe->wr.opcode == IB_WR_RDMA_READ &&
1318 opcode == OP(RDMA_READ_RESPONSE_ONLY) &&
1324 * If this request is a RDMA read or atomic, and the ACK is
1325 * for a later operation, this ACK NAKs the RDMA read or
1326 * atomic. In other words, only a RDMA_READ_LAST or ONLY
1327 * can ACK a RDMA read and likewise for atomic ops. Note
1328 * that the NAK case can only happen if relaxed ordering is
1329 * used and requests are sent after an RDMA read or atomic
1330 * is sent but before the response is received.
1332 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
1333 (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) ||
1334 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1335 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
1336 (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) {
1337 /* Retry this request. */
1338 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1339 qp->r_flags |= RVT_R_RDMAR_SEQ;
1340 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1341 if (list_empty(&qp->rspwait)) {
1342 qp->r_flags |= RVT_R_RSP_SEND;
1344 list_add_tail(&qp->rspwait,
1345 &rcd->qp_wait_list);
1349 * No need to process the ACK/NAK since we are
1350 * restarting an earlier request.
1354 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1355 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
1356 u64 *vaddr = wqe->sg_list[0].vaddr;
1359 if (qp->s_num_rd_atomic &&
1360 (wqe->wr.opcode == IB_WR_RDMA_READ ||
1361 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1362 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) {
1363 qp->s_num_rd_atomic--;
1364 /* Restart sending task if fence is complete */
1365 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
1366 !qp->s_num_rd_atomic) {
1367 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
1369 hfi1_schedule_send(qp);
1370 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
1371 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
1373 hfi1_schedule_send(qp);
1376 wqe = do_rc_completion(qp, wqe, ibp);
1377 if (qp->s_acked == qp->s_tail)
1381 switch (aeth >> IB_AETH_NAK_SHIFT) {
1383 this_cpu_inc(*ibp->rvp.rc_acks);
1384 if (qp->s_acked != qp->s_tail) {
1386 * We are expecting more ACKs so
1387 * mod the retry timer.
1389 rvt_mod_retry_timer(qp);
1391 * We can stop re-sending the earlier packets and
1392 * continue with the next packet the receiver wants.
1394 if (cmp_psn(qp->s_psn, psn) <= 0)
1395 reset_psn(qp, psn + 1);
1397 /* No more acks - kill all timers */
1398 rvt_stop_rc_timers(qp);
1399 if (cmp_psn(qp->s_psn, psn) <= 0) {
1400 qp->s_state = OP(SEND_LAST);
1401 qp->s_psn = psn + 1;
1404 if (qp->s_flags & RVT_S_WAIT_ACK) {
1405 qp->s_flags &= ~RVT_S_WAIT_ACK;
1406 hfi1_schedule_send(qp);
1408 rvt_get_credit(qp, aeth);
1409 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1410 qp->s_retry = qp->s_retry_cnt;
1411 update_last_psn(qp, psn);
1414 case 1: /* RNR NAK */
1415 ibp->rvp.n_rnr_naks++;
1416 if (qp->s_acked == qp->s_tail)
1418 if (qp->s_flags & RVT_S_WAIT_RNR)
1420 if (qp->s_rnr_retry == 0) {
1421 status = IB_WC_RNR_RETRY_EXC_ERR;
1424 if (qp->s_rnr_retry_cnt < 7)
1427 /* The last valid PSN is the previous PSN. */
1428 update_last_psn(qp, psn - 1);
1430 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1434 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
1435 rvt_stop_rc_timers(qp);
1436 rvt_add_rnr_timer(qp, aeth);
1440 if (qp->s_acked == qp->s_tail)
1442 /* The last valid PSN is the previous PSN. */
1443 update_last_psn(qp, psn - 1);
1444 switch ((aeth >> IB_AETH_CREDIT_SHIFT) &
1445 IB_AETH_CREDIT_MASK) {
1446 case 0: /* PSN sequence error */
1447 ibp->rvp.n_seq_naks++;
1449 * Back up to the responder's expected PSN.
1450 * Note that we might get a NAK in the middle of an
1451 * RDMA READ response which terminates the RDMA
1454 hfi1_restart_rc(qp, psn, 0);
1455 hfi1_schedule_send(qp);
1458 case 1: /* Invalid Request */
1459 status = IB_WC_REM_INV_REQ_ERR;
1460 ibp->rvp.n_other_naks++;
1463 case 2: /* Remote Access Error */
1464 status = IB_WC_REM_ACCESS_ERR;
1465 ibp->rvp.n_other_naks++;
1468 case 3: /* Remote Operation Error */
1469 status = IB_WC_REM_OP_ERR;
1470 ibp->rvp.n_other_naks++;
1472 if (qp->s_last == qp->s_acked) {
1473 hfi1_send_complete(qp, wqe, status);
1474 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1479 /* Ignore other reserved NAK error codes */
1482 qp->s_retry = qp->s_retry_cnt;
1483 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
1486 default: /* 2: reserved */
1488 /* Ignore reserved NAK codes. */
1491 /* cannot be reached */
1493 rvt_stop_rc_timers(qp);
1498 * We have seen an out of sequence RDMA read middle or last packet.
1499 * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE.
1501 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
1502 struct hfi1_ctxtdata *rcd)
1504 struct rvt_swqe *wqe;
1506 lockdep_assert_held(&qp->s_lock);
1507 /* Remove QP from retry timer */
1508 rvt_stop_rc_timers(qp);
1510 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1512 while (cmp_psn(psn, wqe->lpsn) > 0) {
1513 if (wqe->wr.opcode == IB_WR_RDMA_READ ||
1514 wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
1515 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
1517 wqe = do_rc_completion(qp, wqe, ibp);
1520 ibp->rvp.n_rdma_seq++;
1521 qp->r_flags |= RVT_R_RDMAR_SEQ;
1522 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1523 if (list_empty(&qp->rspwait)) {
1524 qp->r_flags |= RVT_R_RSP_SEND;
1526 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1531 * rc_rcv_resp - process an incoming RC response packet
1532 * @packet: data packet information
1534 * This is called from hfi1_rc_rcv() to process an incoming RC response
1535 * packet for the given QP.
1536 * Called at interrupt level.
1538 static void rc_rcv_resp(struct hfi1_packet *packet)
1540 struct hfi1_ctxtdata *rcd = packet->rcd;
1541 void *data = packet->payload;
1542 u32 tlen = packet->tlen;
1543 struct rvt_qp *qp = packet->qp;
1544 struct hfi1_ibport *ibp;
1545 struct ib_other_headers *ohdr = packet->ohdr;
1546 struct rvt_swqe *wqe;
1547 enum ib_wc_status status;
1548 unsigned long flags;
1552 u32 psn = ib_bth_get_psn(packet->ohdr);
1553 u32 pmtu = qp->pmtu;
1554 u16 hdrsize = packet->hlen;
1555 u8 opcode = packet->opcode;
1556 u8 pad = packet->pad;
1557 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
1559 spin_lock_irqsave(&qp->s_lock, flags);
1560 trace_hfi1_ack(qp, psn);
1562 /* Ignore invalid responses. */
1563 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
1566 /* Ignore duplicate responses. */
1567 diff = cmp_psn(psn, qp->s_last_psn);
1568 if (unlikely(diff <= 0)) {
1569 /* Update credits for "ghost" ACKs */
1570 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1571 aeth = be32_to_cpu(ohdr->u.aeth);
1572 if ((aeth >> IB_AETH_NAK_SHIFT) == 0)
1573 rvt_get_credit(qp, aeth);
1579 * Skip everything other than the PSN we expect, if we are waiting
1580 * for a reply to a restarted RDMA read or atomic op.
1582 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
1583 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
1585 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
1588 if (unlikely(qp->s_acked == qp->s_tail))
1590 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1591 status = IB_WC_SUCCESS;
1594 case OP(ACKNOWLEDGE):
1595 case OP(ATOMIC_ACKNOWLEDGE):
1596 case OP(RDMA_READ_RESPONSE_FIRST):
1597 aeth = be32_to_cpu(ohdr->u.aeth);
1598 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1599 val = ib_u64_get(&ohdr->u.at.atomic_ack_eth);
1602 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
1603 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1605 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1606 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1609 * If this is a response to a resent RDMA read, we
1610 * have to be careful to copy the data to the right
1613 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1617 case OP(RDMA_READ_RESPONSE_MIDDLE):
1618 /* no AETH, no ACK */
1619 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1621 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1624 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
1626 if (unlikely(pmtu >= qp->s_rdma_read_len))
1630 * We got a response so update the timeout.
1631 * 4.096 usec. * (1 << qp->timeout)
1633 rvt_mod_retry_timer(qp);
1634 if (qp->s_flags & RVT_S_WAIT_ACK) {
1635 qp->s_flags &= ~RVT_S_WAIT_ACK;
1636 hfi1_schedule_send(qp);
1639 if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE))
1640 qp->s_retry = qp->s_retry_cnt;
1643 * Update the RDMA receive state but do the copy w/o
1644 * holding the locks and blocking interrupts.
1646 qp->s_rdma_read_len -= pmtu;
1647 update_last_psn(qp, psn);
1648 spin_unlock_irqrestore(&qp->s_lock, flags);
1649 hfi1_copy_sge(&qp->s_rdma_read_sge, data, pmtu, false, false);
1652 case OP(RDMA_READ_RESPONSE_ONLY):
1653 aeth = be32_to_cpu(ohdr->u.aeth);
1654 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
1657 * Check that the data size is >= 0 && <= pmtu.
1658 * Remember to account for ICRC (4).
1660 if (unlikely(tlen < (hdrsize + extra_bytes)))
1663 * If this is a response to a resent RDMA read, we
1664 * have to be careful to copy the data to the right
1667 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1668 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
1672 case OP(RDMA_READ_RESPONSE_LAST):
1673 /* ACKs READ req. */
1674 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
1676 if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ))
1679 * Check that the data size is >= 1 && <= pmtu.
1680 * Remember to account for ICRC (4).
1682 if (unlikely(tlen <= (hdrsize + extra_bytes)))
1685 tlen -= hdrsize + extra_bytes;
1686 if (unlikely(tlen != qp->s_rdma_read_len))
1688 aeth = be32_to_cpu(ohdr->u.aeth);
1689 hfi1_copy_sge(&qp->s_rdma_read_sge, data, tlen, false, false);
1690 WARN_ON(qp->s_rdma_read_sge.num_sge);
1691 (void)do_rc_ack(qp, aeth, psn,
1692 OP(RDMA_READ_RESPONSE_LAST), 0, rcd);
1697 status = IB_WC_LOC_QP_OP_ERR;
1701 ibp = rcd_to_iport(rcd);
1702 rdma_seq_err(qp, ibp, psn, rcd);
1706 status = IB_WC_LOC_LEN_ERR;
1708 if (qp->s_last == qp->s_acked) {
1709 hfi1_send_complete(qp, wqe, status);
1710 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1713 spin_unlock_irqrestore(&qp->s_lock, flags);
1718 static inline void rc_defered_ack(struct hfi1_ctxtdata *rcd,
1721 if (list_empty(&qp->rspwait)) {
1722 qp->r_flags |= RVT_R_RSP_NAK;
1724 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1728 static inline void rc_cancel_ack(struct rvt_qp *qp)
1731 if (list_empty(&qp->rspwait))
1733 list_del_init(&qp->rspwait);
1734 qp->r_flags &= ~RVT_R_RSP_NAK;
1739 * rc_rcv_error - process an incoming duplicate or error RC packet
1740 * @ohdr: the other headers for this packet
1741 * @data: the packet data
1742 * @qp: the QP for this packet
1743 * @opcode: the opcode for this packet
1744 * @psn: the packet sequence number for this packet
1745 * @diff: the difference between the PSN and the expected PSN
1747 * This is called from hfi1_rc_rcv() to process an unexpected
1748 * incoming RC packet for the given QP.
1749 * Called at interrupt level.
1750 * Return 1 if no more processing is needed; otherwise return 0 to
1751 * schedule a response to be sent.
1753 static noinline int rc_rcv_error(struct ib_other_headers *ohdr, void *data,
1754 struct rvt_qp *qp, u32 opcode, u32 psn,
1755 int diff, struct hfi1_ctxtdata *rcd)
1757 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
1758 struct rvt_ack_entry *e;
1759 unsigned long flags;
1763 trace_hfi1_rcv_error(qp, psn);
1766 * Packet sequence error.
1767 * A NAK will ACK earlier sends and RDMA writes.
1768 * Don't queue the NAK if we already sent one.
1770 if (!qp->r_nak_state) {
1771 ibp->rvp.n_rc_seqnak++;
1772 qp->r_nak_state = IB_NAK_PSN_ERROR;
1773 /* Use the expected PSN. */
1774 qp->r_ack_psn = qp->r_psn;
1776 * Wait to send the sequence NAK until all packets
1777 * in the receive queue have been processed.
1778 * Otherwise, we end up propagating congestion.
1780 rc_defered_ack(rcd, qp);
1786 * Handle a duplicate request. Don't re-execute SEND, RDMA
1787 * write or atomic op. Don't NAK errors, just silently drop
1788 * the duplicate request. Note that r_sge, r_len, and
1789 * r_rcv_len may be in use so don't modify them.
1791 * We are supposed to ACK the earliest duplicate PSN but we
1792 * can coalesce an outstanding duplicate ACK. We have to
1793 * send the earliest so that RDMA reads can be restarted at
1794 * the requester's expected PSN.
1796 * First, find where this duplicate PSN falls within the
1797 * ACKs previously sent.
1798 * old_req is true if there is an older response that is scheduled
1799 * to be sent before sending this one.
1803 ibp->rvp.n_rc_dupreq++;
1805 spin_lock_irqsave(&qp->s_lock, flags);
1807 for (i = qp->r_head_ack_queue; ; i = prev) {
1808 if (i == qp->s_tail_ack_queue)
1813 prev = HFI1_MAX_RDMA_ATOMIC;
1814 if (prev == qp->r_head_ack_queue) {
1818 e = &qp->s_ack_queue[prev];
1823 if (cmp_psn(psn, e->psn) >= 0) {
1824 if (prev == qp->s_tail_ack_queue &&
1825 cmp_psn(psn, e->lpsn) <= 0)
1831 case OP(RDMA_READ_REQUEST): {
1832 struct ib_reth *reth;
1837 * If we didn't find the RDMA read request in the ack queue,
1838 * we can ignore this request.
1840 if (!e || e->opcode != OP(RDMA_READ_REQUEST))
1842 /* RETH comes after BTH */
1843 reth = &ohdr->u.rc.reth;
1845 * Address range must be a subset of the original
1846 * request and start on pmtu boundaries.
1847 * We reuse the old ack_queue slot since the requester
1848 * should not back up and request an earlier PSN for the
1851 offset = delta_psn(psn, e->psn) * qp->pmtu;
1852 len = be32_to_cpu(reth->length);
1853 if (unlikely(offset + len != e->rdma_sge.sge_length))
1855 if (e->rdma_sge.mr) {
1856 rvt_put_mr(e->rdma_sge.mr);
1857 e->rdma_sge.mr = NULL;
1860 u32 rkey = be32_to_cpu(reth->rkey);
1861 u64 vaddr = get_ib_reth_vaddr(reth);
1864 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
1865 IB_ACCESS_REMOTE_READ);
1869 e->rdma_sge.vaddr = NULL;
1870 e->rdma_sge.length = 0;
1871 e->rdma_sge.sge_length = 0;
1876 qp->s_tail_ack_queue = prev;
1880 case OP(COMPARE_SWAP):
1881 case OP(FETCH_ADD): {
1883 * If we didn't find the atomic request in the ack queue
1884 * or the send engine is already backed up to send an
1885 * earlier entry, we can ignore this request.
1887 if (!e || e->opcode != (u8)opcode || old_req)
1889 qp->s_tail_ack_queue = prev;
1895 * Ignore this operation if it doesn't request an ACK
1896 * or an earlier RDMA read or atomic is going to be resent.
1898 if (!(psn & IB_BTH_REQ_ACK) || old_req)
1901 * Resend the most recent ACK if this request is
1902 * after all the previous RDMA reads and atomics.
1904 if (i == qp->r_head_ack_queue) {
1905 spin_unlock_irqrestore(&qp->s_lock, flags);
1906 qp->r_nak_state = 0;
1907 qp->r_ack_psn = qp->r_psn - 1;
1912 * Resend the RDMA read or atomic op which
1913 * ACKs this duplicate request.
1915 qp->s_tail_ack_queue = i;
1918 qp->s_ack_state = OP(ACKNOWLEDGE);
1919 qp->s_flags |= RVT_S_RESP_PENDING;
1920 qp->r_nak_state = 0;
1921 hfi1_schedule_send(qp);
1924 spin_unlock_irqrestore(&qp->s_lock, flags);
1932 static inline void update_ack_queue(struct rvt_qp *qp, unsigned n)
1937 if (next > HFI1_MAX_RDMA_ATOMIC)
1939 qp->s_tail_ack_queue = next;
1940 qp->s_ack_state = OP(ACKNOWLEDGE);
1943 static void log_cca_event(struct hfi1_pportdata *ppd, u8 sl, u32 rlid,
1944 u32 lqpn, u32 rqpn, u8 svc_type)
1946 struct opa_hfi1_cong_log_event_internal *cc_event;
1947 unsigned long flags;
1949 if (sl >= OPA_MAX_SLS)
1952 spin_lock_irqsave(&ppd->cc_log_lock, flags);
1954 ppd->threshold_cong_event_map[sl / 8] |= 1 << (sl % 8);
1955 ppd->threshold_event_counter++;
1957 cc_event = &ppd->cc_events[ppd->cc_log_idx++];
1958 if (ppd->cc_log_idx == OPA_CONG_LOG_ELEMS)
1959 ppd->cc_log_idx = 0;
1960 cc_event->lqpn = lqpn & RVT_QPN_MASK;
1961 cc_event->rqpn = rqpn & RVT_QPN_MASK;
1963 cc_event->svc_type = svc_type;
1964 cc_event->rlid = rlid;
1965 /* keep timestamp in units of 1.024 usec */
1966 cc_event->timestamp = ktime_get_ns() / 1024;
1968 spin_unlock_irqrestore(&ppd->cc_log_lock, flags);
1971 void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
1972 u32 rqpn, u8 svc_type)
1974 struct cca_timer *cca_timer;
1975 u16 ccti, ccti_incr, ccti_timer, ccti_limit;
1976 u8 trigger_threshold;
1977 struct cc_state *cc_state;
1978 unsigned long flags;
1980 if (sl >= OPA_MAX_SLS)
1983 cc_state = get_cc_state(ppd);
1989 * 1) increase CCTI (for this SL)
1990 * 2) select IPG (i.e., call set_link_ipg())
1993 ccti_limit = cc_state->cct.ccti_limit;
1994 ccti_incr = cc_state->cong_setting.entries[sl].ccti_increase;
1995 ccti_timer = cc_state->cong_setting.entries[sl].ccti_timer;
1997 cc_state->cong_setting.entries[sl].trigger_threshold;
1999 spin_lock_irqsave(&ppd->cca_timer_lock, flags);
2001 cca_timer = &ppd->cca_timer[sl];
2002 if (cca_timer->ccti < ccti_limit) {
2003 if (cca_timer->ccti + ccti_incr <= ccti_limit)
2004 cca_timer->ccti += ccti_incr;
2006 cca_timer->ccti = ccti_limit;
2010 ccti = cca_timer->ccti;
2012 if (!hrtimer_active(&cca_timer->hrtimer)) {
2013 /* ccti_timer is in units of 1.024 usec */
2014 unsigned long nsec = 1024 * ccti_timer;
2016 hrtimer_start(&cca_timer->hrtimer, ns_to_ktime(nsec),
2017 HRTIMER_MODE_REL_PINNED);
2020 spin_unlock_irqrestore(&ppd->cca_timer_lock, flags);
2022 if ((trigger_threshold != 0) && (ccti >= trigger_threshold))
2023 log_cca_event(ppd, sl, rlid, lqpn, rqpn, svc_type);
2027 * hfi1_rc_rcv - process an incoming RC packet
2028 * @packet: data packet information
2030 * This is called from qp_rcv() to process an incoming RC packet
2032 * May be called at interrupt level.
2034 void hfi1_rc_rcv(struct hfi1_packet *packet)
2036 struct hfi1_ctxtdata *rcd = packet->rcd;
2037 void *data = packet->payload;
2038 u32 tlen = packet->tlen;
2039 struct rvt_qp *qp = packet->qp;
2040 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2041 struct ib_other_headers *ohdr = packet->ohdr;
2042 u32 opcode = packet->opcode;
2043 u32 hdrsize = packet->hlen;
2044 u32 psn = ib_bth_get_psn(packet->ohdr);
2045 u32 pad = packet->pad;
2047 u32 pmtu = qp->pmtu;
2049 struct ib_reth *reth;
2050 unsigned long flags;
2052 bool copy_last = false, fecn;
2054 u8 extra_bytes = pad + packet->extra_byte + (SIZE_OF_CRC << 2);
2056 lockdep_assert_held(&qp->r_lock);
2058 if (hfi1_ruc_check_hdr(ibp, packet))
2061 fecn = process_ecn(qp, packet);
2064 * Process responses (ACKs) before anything else. Note that the
2065 * packet sequence number will be for something in the send work
2066 * queue rather than the expected receive packet sequence number.
2067 * In other words, this QP is the requester.
2069 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
2070 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
2071 rc_rcv_resp(packet);
2075 /* Compute 24 bits worth of difference. */
2076 diff = delta_psn(psn, qp->r_psn);
2077 if (unlikely(diff)) {
2078 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2083 /* Check for opcode sequence errors. */
2084 switch (qp->r_state) {
2085 case OP(SEND_FIRST):
2086 case OP(SEND_MIDDLE):
2087 if (opcode == OP(SEND_MIDDLE) ||
2088 opcode == OP(SEND_LAST) ||
2089 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2090 opcode == OP(SEND_LAST_WITH_INVALIDATE))
2094 case OP(RDMA_WRITE_FIRST):
2095 case OP(RDMA_WRITE_MIDDLE):
2096 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
2097 opcode == OP(RDMA_WRITE_LAST) ||
2098 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2103 if (opcode == OP(SEND_MIDDLE) ||
2104 opcode == OP(SEND_LAST) ||
2105 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
2106 opcode == OP(SEND_LAST_WITH_INVALIDATE) ||
2107 opcode == OP(RDMA_WRITE_MIDDLE) ||
2108 opcode == OP(RDMA_WRITE_LAST) ||
2109 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
2112 * Note that it is up to the requester to not send a new
2113 * RDMA read or atomic operation before receiving an ACK
2114 * for the previous operation.
2119 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2122 /* OK, process the packet. */
2124 case OP(SEND_FIRST):
2125 ret = rvt_get_rwqe(qp, false);
2132 case OP(SEND_MIDDLE):
2133 case OP(RDMA_WRITE_MIDDLE):
2135 /* Check for invalid length PMTU or posted rwqe len. */
2137 * There will be no padding for 9B packet but 16B packets
2138 * will come in with some padding since we always add
2139 * CRC and LT bytes which will need to be flit aligned
2141 if (unlikely(tlen != (hdrsize + pmtu + extra_bytes)))
2143 qp->r_rcv_len += pmtu;
2144 if (unlikely(qp->r_rcv_len > qp->r_len))
2146 hfi1_copy_sge(&qp->r_sge, data, pmtu, true, false);
2149 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
2151 ret = rvt_get_rwqe(qp, true);
2159 case OP(SEND_ONLY_WITH_IMMEDIATE):
2160 case OP(SEND_ONLY_WITH_INVALIDATE):
2161 ret = rvt_get_rwqe(qp, false);
2167 if (opcode == OP(SEND_ONLY))
2168 goto no_immediate_data;
2169 if (opcode == OP(SEND_ONLY_WITH_INVALIDATE))
2171 /* FALLTHROUGH -- for SEND_ONLY_WITH_IMMEDIATE */
2172 case OP(SEND_LAST_WITH_IMMEDIATE):
2174 wc.ex.imm_data = ohdr->u.imm_data;
2175 wc.wc_flags = IB_WC_WITH_IMM;
2177 case OP(SEND_LAST_WITH_INVALIDATE):
2179 rkey = be32_to_cpu(ohdr->u.ieth);
2180 if (rvt_invalidate_rkey(qp, rkey))
2181 goto no_immediate_data;
2182 wc.ex.invalidate_rkey = rkey;
2183 wc.wc_flags = IB_WC_WITH_INVALIDATE;
2185 case OP(RDMA_WRITE_LAST):
2186 copy_last = rvt_is_user_qp(qp);
2193 /* Check for invalid length. */
2194 /* LAST len should be >= 1 */
2195 if (unlikely(tlen < (hdrsize + extra_bytes)))
2197 /* Don't count the CRC(and padding and LT byte for 16B). */
2198 tlen -= (hdrsize + extra_bytes);
2199 wc.byte_len = tlen + qp->r_rcv_len;
2200 if (unlikely(wc.byte_len > qp->r_len))
2202 hfi1_copy_sge(&qp->r_sge, data, tlen, true, copy_last);
2203 rvt_put_ss(&qp->r_sge);
2205 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2207 wc.wr_id = qp->r_wr_id;
2208 wc.status = IB_WC_SUCCESS;
2209 if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) ||
2210 opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE))
2211 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
2213 wc.opcode = IB_WC_RECV;
2215 wc.src_qp = qp->remote_qpn;
2216 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
2218 * It seems that IB mandates the presence of an SL in a
2219 * work completion only for the UD transport (see section
2220 * 11.4.2 of IBTA Vol. 1).
2222 * However, the way the SL is chosen below is consistent
2223 * with the way that IB/qib works and is trying avoid
2224 * introducing incompatibilities.
2226 * See also OPA Vol. 1, section 9.7.6, and table 9-17.
2228 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
2229 /* zero fields that are N/A */
2232 wc.dlid_path_bits = 0;
2234 /* Signal completion event if the solicited bit is set. */
2235 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.recv_cq), &wc,
2236 ib_bth_is_solicited(ohdr));
2239 case OP(RDMA_WRITE_ONLY):
2240 copy_last = rvt_is_user_qp(qp);
2242 case OP(RDMA_WRITE_FIRST):
2243 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
2244 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
2247 reth = &ohdr->u.rc.reth;
2248 qp->r_len = be32_to_cpu(reth->length);
2250 qp->r_sge.sg_list = NULL;
2251 if (qp->r_len != 0) {
2252 u32 rkey = be32_to_cpu(reth->rkey);
2253 u64 vaddr = get_ib_reth_vaddr(reth);
2256 /* Check rkey & NAK */
2257 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
2258 rkey, IB_ACCESS_REMOTE_WRITE);
2261 qp->r_sge.num_sge = 1;
2263 qp->r_sge.num_sge = 0;
2264 qp->r_sge.sge.mr = NULL;
2265 qp->r_sge.sge.vaddr = NULL;
2266 qp->r_sge.sge.length = 0;
2267 qp->r_sge.sge.sge_length = 0;
2269 if (opcode == OP(RDMA_WRITE_FIRST))
2271 else if (opcode == OP(RDMA_WRITE_ONLY))
2272 goto no_immediate_data;
2273 ret = rvt_get_rwqe(qp, true);
2277 /* peer will send again */
2278 rvt_put_ss(&qp->r_sge);
2281 wc.ex.imm_data = ohdr->u.rc.imm_data;
2282 wc.wc_flags = IB_WC_WITH_IMM;
2285 case OP(RDMA_READ_REQUEST): {
2286 struct rvt_ack_entry *e;
2290 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
2292 next = qp->r_head_ack_queue + 1;
2293 /* s_ack_queue is size HFI1_MAX_RDMA_ATOMIC+1 so use > not >= */
2294 if (next > HFI1_MAX_RDMA_ATOMIC)
2296 spin_lock_irqsave(&qp->s_lock, flags);
2297 if (unlikely(next == qp->s_tail_ack_queue)) {
2298 if (!qp->s_ack_queue[next].sent)
2299 goto nack_inv_unlck;
2300 update_ack_queue(qp, next);
2302 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2303 if (e->rdma_sge.mr) {
2304 rvt_put_mr(e->rdma_sge.mr);
2305 e->rdma_sge.mr = NULL;
2307 reth = &ohdr->u.rc.reth;
2308 len = be32_to_cpu(reth->length);
2310 u32 rkey = be32_to_cpu(reth->rkey);
2311 u64 vaddr = get_ib_reth_vaddr(reth);
2314 /* Check rkey & NAK */
2315 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
2316 rkey, IB_ACCESS_REMOTE_READ);
2318 goto nack_acc_unlck;
2320 * Update the next expected PSN. We add 1 later
2321 * below, so only add the remainder here.
2323 qp->r_psn += rvt_div_mtu(qp, len - 1);
2325 e->rdma_sge.mr = NULL;
2326 e->rdma_sge.vaddr = NULL;
2327 e->rdma_sge.length = 0;
2328 e->rdma_sge.sge_length = 0;
2333 e->lpsn = qp->r_psn;
2335 * We need to increment the MSN here instead of when we
2336 * finish sending the result since a duplicate request would
2337 * increment it more than once.
2341 qp->r_state = opcode;
2342 qp->r_nak_state = 0;
2343 qp->r_head_ack_queue = next;
2345 /* Schedule the send engine. */
2346 qp->s_flags |= RVT_S_RESP_PENDING;
2348 qp->s_flags |= RVT_S_ECN;
2349 hfi1_schedule_send(qp);
2351 spin_unlock_irqrestore(&qp->s_lock, flags);
2355 case OP(COMPARE_SWAP):
2356 case OP(FETCH_ADD): {
2357 struct ib_atomic_eth *ateth;
2358 struct rvt_ack_entry *e;
2365 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
2367 next = qp->r_head_ack_queue + 1;
2368 if (next > HFI1_MAX_RDMA_ATOMIC)
2370 spin_lock_irqsave(&qp->s_lock, flags);
2371 if (unlikely(next == qp->s_tail_ack_queue)) {
2372 if (!qp->s_ack_queue[next].sent)
2373 goto nack_inv_unlck;
2374 update_ack_queue(qp, next);
2376 e = &qp->s_ack_queue[qp->r_head_ack_queue];
2377 if (e->rdma_sge.mr) {
2378 rvt_put_mr(e->rdma_sge.mr);
2379 e->rdma_sge.mr = NULL;
2381 ateth = &ohdr->u.atomic_eth;
2382 vaddr = get_ib_ateth_vaddr(ateth);
2383 if (unlikely(vaddr & (sizeof(u64) - 1)))
2384 goto nack_inv_unlck;
2385 rkey = be32_to_cpu(ateth->rkey);
2386 /* Check rkey & NAK */
2387 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
2389 IB_ACCESS_REMOTE_ATOMIC)))
2390 goto nack_acc_unlck;
2391 /* Perform atomic OP and save result. */
2392 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
2393 sdata = get_ib_ateth_swap(ateth);
2394 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
2395 (u64)atomic64_add_return(sdata, maddr) - sdata :
2396 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
2397 get_ib_ateth_compare(ateth),
2399 rvt_put_mr(qp->r_sge.sge.mr);
2400 qp->r_sge.num_sge = 0;
2407 qp->r_state = opcode;
2408 qp->r_nak_state = 0;
2409 qp->r_head_ack_queue = next;
2411 /* Schedule the send engine. */
2412 qp->s_flags |= RVT_S_RESP_PENDING;
2414 qp->s_flags |= RVT_S_ECN;
2415 hfi1_schedule_send(qp);
2417 spin_unlock_irqrestore(&qp->s_lock, flags);
2422 /* NAK unknown opcodes. */
2426 qp->r_state = opcode;
2427 qp->r_ack_psn = psn;
2428 qp->r_nak_state = 0;
2429 /* Send an ACK if requested or required. */
2430 if (psn & IB_BTH_REQ_ACK || fecn) {
2431 if (packet->numpkt == 0 || fecn ||
2432 qp->r_adefered >= HFI1_PSN_CREDIT) {
2437 rc_defered_ack(rcd, qp);
2442 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
2443 qp->r_ack_psn = qp->r_psn;
2444 /* Queue RNR NAK for later */
2445 rc_defered_ack(rcd, qp);
2449 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2450 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
2451 qp->r_ack_psn = qp->r_psn;
2452 /* Queue NAK for later */
2453 rc_defered_ack(rcd, qp);
2457 spin_unlock_irqrestore(&qp->s_lock, flags);
2459 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
2460 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
2461 qp->r_ack_psn = qp->r_psn;
2462 /* Queue NAK for later */
2463 rc_defered_ack(rcd, qp);
2467 spin_unlock_irqrestore(&qp->s_lock, flags);
2469 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
2470 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
2471 qp->r_ack_psn = qp->r_psn;
2473 hfi1_send_rc_ack(packet, fecn);
2476 void hfi1_rc_hdrerr(
2477 struct hfi1_ctxtdata *rcd,
2478 struct hfi1_packet *packet,
2481 struct hfi1_ibport *ibp = rcd_to_iport(rcd);
2486 if (hfi1_ruc_check_hdr(ibp, packet))
2489 psn = ib_bth_get_psn(packet->ohdr);
2490 opcode = ib_bth_get_opcode(packet->ohdr);
2492 /* Only deal with RDMA Writes for now */
2493 if (opcode < IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST) {
2494 diff = delta_psn(psn, qp->r_psn);
2495 if (!qp->r_nak_state && diff >= 0) {
2496 ibp->rvp.n_rc_seqnak++;
2497 qp->r_nak_state = IB_NAK_PSN_ERROR;
2498 /* Use the expected PSN. */
2499 qp->r_ack_psn = qp->r_psn;
2501 * Wait to send the sequence
2502 * NAK until all packets
2503 * in the receive queue have
2505 * Otherwise, we end up
2506 * propagating congestion.
2508 rc_defered_ack(rcd, qp);
2509 } /* Out of sequence NAK */
2510 } /* QP Request NAKs */