4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2012, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
34 * Message decoding, parsing and finalizing routines
37 #define DEBUG_SUBSYSTEM S_LNET
39 #include <linux/lnet/lib-lnet.h>
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
44 memset(ev, 0, sizeof(*ev));
48 ev->type = LNET_EVENT_UNLINK;
49 lnet_md_deconstruct(md, &ev->md);
50 lnet_md2handle(&ev->md_handle, md);
54 * Don't need any lock, must be called after lnet_commit_md
57 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
59 struct lnet_hdr *hdr = &msg->msg_hdr;
60 struct lnet_event *ev = &msg->msg_ev;
62 LASSERT(!msg->msg_routing);
66 if (ev_type == LNET_EVENT_SEND) {
67 /* event for active message */
68 ev->target.nid = le64_to_cpu(hdr->dest_nid);
69 ev->target.pid = le32_to_cpu(hdr->dest_pid);
70 ev->initiator.nid = LNET_NID_ANY;
71 ev->initiator.pid = the_lnet.ln_pid;
72 ev->sender = LNET_NID_ANY;
74 /* event for passive message */
75 ev->target.pid = hdr->dest_pid;
76 ev->target.nid = hdr->dest_nid;
77 ev->initiator.pid = hdr->src_pid;
78 ev->initiator.nid = hdr->src_nid;
79 ev->rlength = hdr->payload_length;
80 ev->sender = msg->msg_from;
81 ev->mlength = msg->msg_wanted;
82 ev->offset = msg->msg_offset;
89 case LNET_EVENT_PUT: /* passive PUT */
90 ev->pt_index = hdr->msg.put.ptl_index;
91 ev->match_bits = hdr->msg.put.match_bits;
92 ev->hdr_data = hdr->msg.put.hdr_data;
95 case LNET_EVENT_GET: /* passive GET */
96 ev->pt_index = hdr->msg.get.ptl_index;
97 ev->match_bits = hdr->msg.get.match_bits;
101 case LNET_EVENT_ACK: /* ACK */
102 ev->match_bits = hdr->msg.ack.match_bits;
103 ev->mlength = hdr->msg.ack.mlength;
106 case LNET_EVENT_REPLY: /* REPLY */
109 case LNET_EVENT_SEND: /* active message */
110 if (msg->msg_type == LNET_MSG_PUT) {
111 ev->pt_index = le32_to_cpu(hdr->msg.put.ptl_index);
112 ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
113 ev->offset = le32_to_cpu(hdr->msg.put.offset);
115 ev->rlength = le32_to_cpu(hdr->payload_length);
116 ev->hdr_data = le64_to_cpu(hdr->msg.put.hdr_data);
119 LASSERT(msg->msg_type == LNET_MSG_GET);
120 ev->pt_index = le32_to_cpu(hdr->msg.get.ptl_index);
121 ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
123 ev->rlength = le32_to_cpu(hdr->msg.get.sink_length);
124 ev->offset = le32_to_cpu(hdr->msg.get.src_offset);
132 lnet_msg_commit(struct lnet_msg *msg, int cpt)
134 struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
135 struct lnet_counters *counters = the_lnet.ln_counters[cpt];
137 /* routed message can be committed for both receiving and sending */
138 LASSERT(!msg->msg_tx_committed);
140 if (msg->msg_sending) {
141 LASSERT(!msg->msg_receiving);
143 msg->msg_tx_cpt = cpt;
144 msg->msg_tx_committed = 1;
145 if (msg->msg_rx_committed) { /* routed message REPLY */
146 LASSERT(msg->msg_onactivelist);
150 LASSERT(!msg->msg_sending);
151 msg->msg_rx_cpt = cpt;
152 msg->msg_rx_committed = 1;
155 LASSERT(!msg->msg_onactivelist);
156 msg->msg_onactivelist = 1;
157 list_add(&msg->msg_activelist, &container->msc_active);
159 counters->msgs_alloc++;
160 if (counters->msgs_alloc > counters->msgs_max)
161 counters->msgs_max = counters->msgs_alloc;
165 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
167 struct lnet_counters *counters;
168 struct lnet_event *ev = &msg->msg_ev;
170 LASSERT(msg->msg_tx_committed);
174 counters = the_lnet.ln_counters[msg->msg_tx_cpt];
176 default: /* routed message */
177 LASSERT(msg->msg_routing);
178 LASSERT(msg->msg_rx_committed);
181 counters->route_length += msg->msg_len;
182 counters->route_count++;
186 /* should have been decommitted */
187 LASSERT(!msg->msg_rx_committed);
188 /* overwritten while sending ACK */
189 LASSERT(msg->msg_type == LNET_MSG_ACK);
190 msg->msg_type = LNET_MSG_PUT; /* fix type */
193 case LNET_EVENT_SEND:
194 LASSERT(!msg->msg_rx_committed);
195 if (msg->msg_type == LNET_MSG_PUT)
196 counters->send_length += msg->msg_len;
200 LASSERT(msg->msg_rx_committed);
202 * overwritten while sending reply, we should never be
203 * here for optimized GET
205 LASSERT(msg->msg_type == LNET_MSG_REPLY);
206 msg->msg_type = LNET_MSG_GET; /* fix type */
210 counters->send_count++;
212 lnet_return_tx_credits_locked(msg);
213 msg->msg_tx_committed = 0;
217 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
219 struct lnet_counters *counters;
220 struct lnet_event *ev = &msg->msg_ev;
222 LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
223 LASSERT(msg->msg_rx_committed);
228 counters = the_lnet.ln_counters[msg->msg_rx_cpt];
232 LASSERT(msg->msg_routing);
236 LASSERT(msg->msg_type == LNET_MSG_ACK);
241 * type is "REPLY" if it's an optimized GET on passive side,
242 * because optimized GET will never be committed for sending,
243 * so message type wouldn't be changed back to "GET" by
244 * lnet_msg_decommit_tx(), see details in lnet_parse_get()
246 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
247 msg->msg_type == LNET_MSG_GET);
248 counters->send_length += msg->msg_wanted;
252 LASSERT(msg->msg_type == LNET_MSG_PUT);
255 case LNET_EVENT_REPLY:
257 * type is "GET" if it's an optimized GET on active side,
258 * see details in lnet_create_reply_msg()
260 LASSERT(msg->msg_type == LNET_MSG_GET ||
261 msg->msg_type == LNET_MSG_REPLY);
265 counters->recv_count++;
266 if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
267 counters->recv_length += msg->msg_wanted;
270 lnet_return_rx_credits_locked(msg);
271 msg->msg_rx_committed = 0;
275 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
279 LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
280 LASSERT(msg->msg_onactivelist);
282 if (msg->msg_tx_committed) { /* always decommit for sending first */
283 LASSERT(cpt == msg->msg_tx_cpt);
284 lnet_msg_decommit_tx(msg, status);
287 if (msg->msg_rx_committed) {
288 /* forwarding msg committed for both receiving and sending */
289 if (cpt != msg->msg_rx_cpt) {
290 lnet_net_unlock(cpt);
291 cpt2 = msg->msg_rx_cpt;
294 lnet_msg_decommit_rx(msg, status);
297 list_del(&msg->msg_activelist);
298 msg->msg_onactivelist = 0;
300 the_lnet.ln_counters[cpt2]->msgs_alloc--;
303 lnet_net_unlock(cpt2);
309 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
310 unsigned int offset, unsigned int mlen)
312 /* NB: @offset and @len are only useful for receiving */
314 * Here, we attach the MD on lnet_msg and mark it busy and
315 * decrementing its threshold. Come what may, the lnet_msg "owns"
316 * the MD until a call to lnet_msg_detach_md or lnet_finalize()
317 * signals completion.
319 LASSERT(!msg->msg_routing);
322 if (msg->msg_receiving) { /* committed for receiving */
323 msg->msg_offset = offset;
324 msg->msg_wanted = mlen;
328 if (md->md_threshold != LNET_MD_THRESH_INF) {
329 LASSERT(md->md_threshold > 0);
333 /* build umd in event */
334 lnet_md2handle(&msg->msg_ev.md_handle, md);
335 lnet_md_deconstruct(md, &msg->msg_ev.md);
339 lnet_msg_detach_md(struct lnet_msg *msg, int status)
341 struct lnet_libmd *md = msg->msg_md;
344 /* Now it's safe to drop my caller's ref */
346 LASSERT(md->md_refcount >= 0);
348 unlink = lnet_md_unlinkable(md);
350 msg->msg_ev.status = status;
351 msg->msg_ev.unlinked = unlink;
352 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
362 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
364 struct lnet_handle_wire ack_wmd;
366 int status = msg->msg_ev.status;
368 LASSERT(msg->msg_onactivelist);
370 if (!status && msg->msg_ack) {
371 /* Only send an ACK if the PUT completed successfully */
373 lnet_msg_decommit(msg, cpt, 0);
376 lnet_net_unlock(cpt);
378 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
379 LASSERT(!msg->msg_routing);
381 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
383 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0);
385 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
386 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
387 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
390 * NB: we probably want to use NID of msg::msg_from as 3rd
391 * parameter (router NID) if it's routed message
393 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
397 * NB: message is committed for sending, we should return
398 * on success because LND will finalize this message later.
400 * Also, there is possibility that message is committed for
401 * sending and also failed before delivering to LND,
402 * i.e: ENOMEM, in that case we can't fall through either
403 * because CPT for sending can be different with CPT for
404 * receiving, so we should return back to lnet_finalize()
405 * to make sure we are locking the correct partition.
409 } else if (!status && /* OK so far */
410 (msg->msg_routing && !msg->msg_sending)) {
412 LASSERT(!msg->msg_receiving); /* called back recv already */
413 lnet_net_unlock(cpt);
415 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
419 * NB: message is committed for sending, we should return
420 * on success because LND will finalize this message later.
422 * Also, there is possibility that message is committed for
423 * sending and also failed before delivering to LND,
424 * i.e: ENOMEM, in that case we can't fall through either:
425 * - The rule is message must decommit for sending first if
426 * the it's committed for both sending and receiving
427 * - CPT for sending can be different with CPT for receiving,
428 * so we should return back to lnet_finalize() to make
429 * sure we are locking the correct partition.
434 lnet_msg_decommit(msg, cpt, status);
440 lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int status)
442 struct lnet_msg_container *container;
448 LASSERT(!in_interrupt());
453 msg->msg_ev.status = status;
456 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
459 lnet_msg_detach_md(msg, status);
460 lnet_res_unlock(cpt);
465 if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
466 /* not committed to network yet */
467 LASSERT(!msg->msg_onactivelist);
473 * NB: routed message can be committed for both receiving and sending,
474 * we should finalize in LIFO order and keep counters correct.
475 * (finalize sending first then finalize receiving)
477 cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
480 container = the_lnet.ln_msg_containers[cpt];
481 list_add_tail(&msg->msg_list, &container->msc_finalizing);
484 * Recursion breaker. Don't complete the message here if I am (or
485 * enough other threads are) already completing messages
488 for (i = 0; i < container->msc_nfinalizers; i++) {
489 if (container->msc_finalizers[i] == current)
492 if (my_slot < 0 && !container->msc_finalizers[i])
496 if (i < container->msc_nfinalizers || my_slot < 0) {
497 lnet_net_unlock(cpt);
501 container->msc_finalizers[my_slot] = current;
503 while (!list_empty(&container->msc_finalizing)) {
504 msg = list_entry(container->msc_finalizing.next,
505 struct lnet_msg, msg_list);
507 list_del(&msg->msg_list);
510 * NB drops and regains the lnet lock if it actually does
511 * anything, so my finalizing friends can chomp along too
513 rc = lnet_complete_msg_locked(msg, cpt);
518 if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
519 lnet_net_unlock(cpt);
520 lnet_delay_rule_check();
524 container->msc_finalizers[my_slot] = NULL;
525 lnet_net_unlock(cpt);
530 EXPORT_SYMBOL(lnet_finalize);
533 lnet_msg_container_cleanup(struct lnet_msg_container *container)
537 if (!container->msc_init)
540 while (!list_empty(&container->msc_active)) {
541 struct lnet_msg *msg;
543 msg = list_entry(container->msc_active.next,
544 struct lnet_msg, msg_activelist);
545 LASSERT(msg->msg_onactivelist);
546 msg->msg_onactivelist = 0;
547 list_del(&msg->msg_activelist);
553 CERROR("%d active msg on exit\n", count);
555 if (container->msc_finalizers) {
556 LIBCFS_FREE(container->msc_finalizers,
557 container->msc_nfinalizers *
558 sizeof(*container->msc_finalizers));
559 container->msc_finalizers = NULL;
561 container->msc_init = 0;
565 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
567 container->msc_init = 1;
569 INIT_LIST_HEAD(&container->msc_active);
570 INIT_LIST_HEAD(&container->msc_finalizing);
573 container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
575 LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
576 container->msc_nfinalizers *
577 sizeof(*container->msc_finalizers));
579 if (!container->msc_finalizers) {
580 CERROR("Failed to allocate message finalizers\n");
581 lnet_msg_container_cleanup(container);
589 lnet_msg_containers_destroy(void)
591 struct lnet_msg_container *container;
594 if (!the_lnet.ln_msg_containers)
597 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
598 lnet_msg_container_cleanup(container);
600 cfs_percpt_free(the_lnet.ln_msg_containers);
601 the_lnet.ln_msg_containers = NULL;
605 lnet_msg_containers_create(void)
607 struct lnet_msg_container *container;
611 the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
614 if (!the_lnet.ln_msg_containers) {
615 CERROR("Failed to allocate cpu-partition data for network\n");
619 cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
620 rc = lnet_msg_container_setup(container, i);
622 lnet_msg_containers_destroy();