GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / staging / lustre / lnet / lnet / lib-msg.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2012, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  *
32  * lnet/lnet/lib-msg.c
33  *
34  * Message decoding, parsing and finalizing routines
35  */
36
37 #define DEBUG_SUBSYSTEM S_LNET
38
39 #include <linux/lnet/lib-lnet.h>
40
41 void
42 lnet_build_unlink_event(struct lnet_libmd *md, struct lnet_event *ev)
43 {
44         memset(ev, 0, sizeof(*ev));
45
46         ev->status   = 0;
47         ev->unlinked = 1;
48         ev->type     = LNET_EVENT_UNLINK;
49         lnet_md_deconstruct(md, &ev->md);
50         lnet_md2handle(&ev->md_handle, md);
51 }
52
53 /*
54  * Don't need any lock, must be called after lnet_commit_md
55  */
56 void
57 lnet_build_msg_event(struct lnet_msg *msg, enum lnet_event_kind ev_type)
58 {
59         struct lnet_hdr *hdr = &msg->msg_hdr;
60         struct lnet_event *ev  = &msg->msg_ev;
61
62         LASSERT(!msg->msg_routing);
63
64         ev->type = ev_type;
65
66         if (ev_type == LNET_EVENT_SEND) {
67                 /* event for active message */
68                 ev->target.nid    = le64_to_cpu(hdr->dest_nid);
69                 ev->target.pid    = le32_to_cpu(hdr->dest_pid);
70                 ev->initiator.nid = LNET_NID_ANY;
71                 ev->initiator.pid = the_lnet.ln_pid;
72                 ev->sender        = LNET_NID_ANY;
73         } else {
74                 /* event for passive message */
75                 ev->target.pid    = hdr->dest_pid;
76                 ev->target.nid    = hdr->dest_nid;
77                 ev->initiator.pid = hdr->src_pid;
78                 ev->initiator.nid = hdr->src_nid;
79                 ev->rlength       = hdr->payload_length;
80                 ev->sender        = msg->msg_from;
81                 ev->mlength       = msg->msg_wanted;
82                 ev->offset        = msg->msg_offset;
83         }
84
85         switch (ev_type) {
86         default:
87                 LBUG();
88
89         case LNET_EVENT_PUT: /* passive PUT */
90                 ev->pt_index   = hdr->msg.put.ptl_index;
91                 ev->match_bits = hdr->msg.put.match_bits;
92                 ev->hdr_data   = hdr->msg.put.hdr_data;
93                 return;
94
95         case LNET_EVENT_GET: /* passive GET */
96                 ev->pt_index   = hdr->msg.get.ptl_index;
97                 ev->match_bits = hdr->msg.get.match_bits;
98                 ev->hdr_data   = 0;
99                 return;
100
101         case LNET_EVENT_ACK: /* ACK */
102                 ev->match_bits = hdr->msg.ack.match_bits;
103                 ev->mlength    = hdr->msg.ack.mlength;
104                 return;
105
106         case LNET_EVENT_REPLY: /* REPLY */
107                 return;
108
109         case LNET_EVENT_SEND: /* active message */
110                 if (msg->msg_type == LNET_MSG_PUT) {
111                         ev->pt_index   = le32_to_cpu(hdr->msg.put.ptl_index);
112                         ev->match_bits = le64_to_cpu(hdr->msg.put.match_bits);
113                         ev->offset     = le32_to_cpu(hdr->msg.put.offset);
114                         ev->mlength    =
115                         ev->rlength    = le32_to_cpu(hdr->payload_length);
116                         ev->hdr_data   = le64_to_cpu(hdr->msg.put.hdr_data);
117
118                 } else {
119                         LASSERT(msg->msg_type == LNET_MSG_GET);
120                         ev->pt_index   = le32_to_cpu(hdr->msg.get.ptl_index);
121                         ev->match_bits = le64_to_cpu(hdr->msg.get.match_bits);
122                         ev->mlength    =
123                         ev->rlength    = le32_to_cpu(hdr->msg.get.sink_length);
124                         ev->offset     = le32_to_cpu(hdr->msg.get.src_offset);
125                         ev->hdr_data   = 0;
126                 }
127                 return;
128         }
129 }
130
131 void
132 lnet_msg_commit(struct lnet_msg *msg, int cpt)
133 {
134         struct lnet_msg_container *container = the_lnet.ln_msg_containers[cpt];
135         struct lnet_counters *counters  = the_lnet.ln_counters[cpt];
136
137         /* routed message can be committed for both receiving and sending */
138         LASSERT(!msg->msg_tx_committed);
139
140         if (msg->msg_sending) {
141                 LASSERT(!msg->msg_receiving);
142
143                 msg->msg_tx_cpt = cpt;
144                 msg->msg_tx_committed = 1;
145                 if (msg->msg_rx_committed) { /* routed message REPLY */
146                         LASSERT(msg->msg_onactivelist);
147                         return;
148                 }
149         } else {
150                 LASSERT(!msg->msg_sending);
151                 msg->msg_rx_cpt = cpt;
152                 msg->msg_rx_committed = 1;
153         }
154
155         LASSERT(!msg->msg_onactivelist);
156         msg->msg_onactivelist = 1;
157         list_add(&msg->msg_activelist, &container->msc_active);
158
159         counters->msgs_alloc++;
160         if (counters->msgs_alloc > counters->msgs_max)
161                 counters->msgs_max = counters->msgs_alloc;
162 }
163
164 static void
165 lnet_msg_decommit_tx(struct lnet_msg *msg, int status)
166 {
167         struct lnet_counters    *counters;
168         struct lnet_event *ev = &msg->msg_ev;
169
170         LASSERT(msg->msg_tx_committed);
171         if (status)
172                 goto out;
173
174         counters = the_lnet.ln_counters[msg->msg_tx_cpt];
175         switch (ev->type) {
176         default: /* routed message */
177                 LASSERT(msg->msg_routing);
178                 LASSERT(msg->msg_rx_committed);
179                 LASSERT(!ev->type);
180
181                 counters->route_length += msg->msg_len;
182                 counters->route_count++;
183                 goto out;
184
185         case LNET_EVENT_PUT:
186                 /* should have been decommitted */
187                 LASSERT(!msg->msg_rx_committed);
188                 /* overwritten while sending ACK */
189                 LASSERT(msg->msg_type == LNET_MSG_ACK);
190                 msg->msg_type = LNET_MSG_PUT; /* fix type */
191                 break;
192
193         case LNET_EVENT_SEND:
194                 LASSERT(!msg->msg_rx_committed);
195                 if (msg->msg_type == LNET_MSG_PUT)
196                         counters->send_length += msg->msg_len;
197                 break;
198
199         case LNET_EVENT_GET:
200                 LASSERT(msg->msg_rx_committed);
201                 /*
202                  * overwritten while sending reply, we should never be
203                  * here for optimized GET
204                  */
205                 LASSERT(msg->msg_type == LNET_MSG_REPLY);
206                 msg->msg_type = LNET_MSG_GET; /* fix type */
207                 break;
208         }
209
210         counters->send_count++;
211  out:
212         lnet_return_tx_credits_locked(msg);
213         msg->msg_tx_committed = 0;
214 }
215
216 static void
217 lnet_msg_decommit_rx(struct lnet_msg *msg, int status)
218 {
219         struct lnet_counters *counters;
220         struct lnet_event *ev = &msg->msg_ev;
221
222         LASSERT(!msg->msg_tx_committed); /* decommitted or never committed */
223         LASSERT(msg->msg_rx_committed);
224
225         if (status)
226                 goto out;
227
228         counters = the_lnet.ln_counters[msg->msg_rx_cpt];
229         switch (ev->type) {
230         default:
231                 LASSERT(!ev->type);
232                 LASSERT(msg->msg_routing);
233                 goto out;
234
235         case LNET_EVENT_ACK:
236                 LASSERT(msg->msg_type == LNET_MSG_ACK);
237                 break;
238
239         case LNET_EVENT_GET:
240                 /*
241                  * type is "REPLY" if it's an optimized GET on passive side,
242                  * because optimized GET will never be committed for sending,
243                  * so message type wouldn't be changed back to "GET" by
244                  * lnet_msg_decommit_tx(), see details in lnet_parse_get()
245                  */
246                 LASSERT(msg->msg_type == LNET_MSG_REPLY ||
247                         msg->msg_type == LNET_MSG_GET);
248                 counters->send_length += msg->msg_wanted;
249                 break;
250
251         case LNET_EVENT_PUT:
252                 LASSERT(msg->msg_type == LNET_MSG_PUT);
253                 break;
254
255         case LNET_EVENT_REPLY:
256                 /*
257                  * type is "GET" if it's an optimized GET on active side,
258                  * see details in lnet_create_reply_msg()
259                  */
260                 LASSERT(msg->msg_type == LNET_MSG_GET ||
261                         msg->msg_type == LNET_MSG_REPLY);
262                 break;
263         }
264
265         counters->recv_count++;
266         if (ev->type == LNET_EVENT_PUT || ev->type == LNET_EVENT_REPLY)
267                 counters->recv_length += msg->msg_wanted;
268
269  out:
270         lnet_return_rx_credits_locked(msg);
271         msg->msg_rx_committed = 0;
272 }
273
274 void
275 lnet_msg_decommit(struct lnet_msg *msg, int cpt, int status)
276 {
277         int cpt2 = cpt;
278
279         LASSERT(msg->msg_tx_committed || msg->msg_rx_committed);
280         LASSERT(msg->msg_onactivelist);
281
282         if (msg->msg_tx_committed) { /* always decommit for sending first */
283                 LASSERT(cpt == msg->msg_tx_cpt);
284                 lnet_msg_decommit_tx(msg, status);
285         }
286
287         if (msg->msg_rx_committed) {
288                 /* forwarding msg committed for both receiving and sending */
289                 if (cpt != msg->msg_rx_cpt) {
290                         lnet_net_unlock(cpt);
291                         cpt2 = msg->msg_rx_cpt;
292                         lnet_net_lock(cpt2);
293                 }
294                 lnet_msg_decommit_rx(msg, status);
295         }
296
297         list_del(&msg->msg_activelist);
298         msg->msg_onactivelist = 0;
299
300         the_lnet.ln_counters[cpt2]->msgs_alloc--;
301
302         if (cpt2 != cpt) {
303                 lnet_net_unlock(cpt2);
304                 lnet_net_lock(cpt);
305         }
306 }
307
308 void
309 lnet_msg_attach_md(struct lnet_msg *msg, struct lnet_libmd *md,
310                    unsigned int offset, unsigned int mlen)
311 {
312         /* NB: @offset and @len are only useful for receiving */
313         /*
314          * Here, we attach the MD on lnet_msg and mark it busy and
315          * decrementing its threshold. Come what may, the lnet_msg "owns"
316          * the MD until a call to lnet_msg_detach_md or lnet_finalize()
317          * signals completion.
318          */
319         LASSERT(!msg->msg_routing);
320
321         msg->msg_md = md;
322         if (msg->msg_receiving) { /* committed for receiving */
323                 msg->msg_offset = offset;
324                 msg->msg_wanted = mlen;
325         }
326
327         md->md_refcount++;
328         if (md->md_threshold != LNET_MD_THRESH_INF) {
329                 LASSERT(md->md_threshold > 0);
330                 md->md_threshold--;
331         }
332
333         /* build umd in event */
334         lnet_md2handle(&msg->msg_ev.md_handle, md);
335         lnet_md_deconstruct(md, &msg->msg_ev.md);
336 }
337
338 void
339 lnet_msg_detach_md(struct lnet_msg *msg, int status)
340 {
341         struct lnet_libmd *md = msg->msg_md;
342         int unlink;
343
344         /* Now it's safe to drop my caller's ref */
345         md->md_refcount--;
346         LASSERT(md->md_refcount >= 0);
347
348         unlink = lnet_md_unlinkable(md);
349         if (md->md_eq) {
350                 msg->msg_ev.status   = status;
351                 msg->msg_ev.unlinked = unlink;
352                 lnet_eq_enqueue_event(md->md_eq, &msg->msg_ev);
353         }
354
355         if (unlink)
356                 lnet_md_unlink(md);
357
358         msg->msg_md = NULL;
359 }
360
361 static int
362 lnet_complete_msg_locked(struct lnet_msg *msg, int cpt)
363 {
364         struct lnet_handle_wire ack_wmd;
365         int rc;
366         int status = msg->msg_ev.status;
367
368         LASSERT(msg->msg_onactivelist);
369
370         if (!status && msg->msg_ack) {
371                 /* Only send an ACK if the PUT completed successfully */
372
373                 lnet_msg_decommit(msg, cpt, 0);
374
375                 msg->msg_ack = 0;
376                 lnet_net_unlock(cpt);
377
378                 LASSERT(msg->msg_ev.type == LNET_EVENT_PUT);
379                 LASSERT(!msg->msg_routing);
380
381                 ack_wmd = msg->msg_hdr.msg.put.ack_wmd;
382
383                 lnet_prep_send(msg, LNET_MSG_ACK, msg->msg_ev.initiator, 0, 0);
384
385                 msg->msg_hdr.msg.ack.dst_wmd = ack_wmd;
386                 msg->msg_hdr.msg.ack.match_bits = msg->msg_ev.match_bits;
387                 msg->msg_hdr.msg.ack.mlength = cpu_to_le32(msg->msg_ev.mlength);
388
389                 /*
390                  * NB: we probably want to use NID of msg::msg_from as 3rd
391                  * parameter (router NID) if it's routed message
392                  */
393                 rc = lnet_send(msg->msg_ev.target.nid, msg, LNET_NID_ANY);
394
395                 lnet_net_lock(cpt);
396                 /*
397                  * NB: message is committed for sending, we should return
398                  * on success because LND will finalize this message later.
399                  *
400                  * Also, there is possibility that message is committed for
401                  * sending and also failed before delivering to LND,
402                  * i.e: ENOMEM, in that case we can't fall through either
403                  * because CPT for sending can be different with CPT for
404                  * receiving, so we should return back to lnet_finalize()
405                  * to make sure we are locking the correct partition.
406                  */
407                 return rc;
408
409         } else if (!status &&   /* OK so far */
410                    (msg->msg_routing && !msg->msg_sending)) {
411                 /* not forwarded */
412                 LASSERT(!msg->msg_receiving);   /* called back recv already */
413                 lnet_net_unlock(cpt);
414
415                 rc = lnet_send(LNET_NID_ANY, msg, LNET_NID_ANY);
416
417                 lnet_net_lock(cpt);
418                 /*
419                  * NB: message is committed for sending, we should return
420                  * on success because LND will finalize this message later.
421                  *
422                  * Also, there is possibility that message is committed for
423                  * sending and also failed before delivering to LND,
424                  * i.e: ENOMEM, in that case we can't fall through either:
425                  * - The rule is message must decommit for sending first if
426                  *   the it's committed for both sending and receiving
427                  * - CPT for sending can be different with CPT for receiving,
428                  *   so we should return back to lnet_finalize() to make
429                  *   sure we are locking the correct partition.
430                  */
431                 return rc;
432         }
433
434         lnet_msg_decommit(msg, cpt, status);
435         lnet_msg_free(msg);
436         return 0;
437 }
438
439 void
440 lnet_finalize(struct lnet_ni *ni, struct lnet_msg *msg, int status)
441 {
442         struct lnet_msg_container *container;
443         int my_slot;
444         int cpt;
445         int rc;
446         int i;
447
448         LASSERT(!in_interrupt());
449
450         if (!msg)
451                 return;
452
453         msg->msg_ev.status = status;
454
455         if (msg->msg_md) {
456                 cpt = lnet_cpt_of_cookie(msg->msg_md->md_lh.lh_cookie);
457
458                 lnet_res_lock(cpt);
459                 lnet_msg_detach_md(msg, status);
460                 lnet_res_unlock(cpt);
461         }
462
463  again:
464         rc = 0;
465         if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
466                 /* not committed to network yet */
467                 LASSERT(!msg->msg_onactivelist);
468                 lnet_msg_free(msg);
469                 return;
470         }
471
472         /*
473          * NB: routed message can be committed for both receiving and sending,
474          * we should finalize in LIFO order and keep counters correct.
475          * (finalize sending first then finalize receiving)
476          */
477         cpt = msg->msg_tx_committed ? msg->msg_tx_cpt : msg->msg_rx_cpt;
478         lnet_net_lock(cpt);
479
480         container = the_lnet.ln_msg_containers[cpt];
481         list_add_tail(&msg->msg_list, &container->msc_finalizing);
482
483         /*
484          * Recursion breaker.  Don't complete the message here if I am (or
485          * enough other threads are) already completing messages
486          */
487         my_slot = -1;
488         for (i = 0; i < container->msc_nfinalizers; i++) {
489                 if (container->msc_finalizers[i] == current)
490                         break;
491
492                 if (my_slot < 0 && !container->msc_finalizers[i])
493                         my_slot = i;
494         }
495
496         if (i < container->msc_nfinalizers || my_slot < 0) {
497                 lnet_net_unlock(cpt);
498                 return;
499         }
500
501         container->msc_finalizers[my_slot] = current;
502
503         while (!list_empty(&container->msc_finalizing)) {
504                 msg = list_entry(container->msc_finalizing.next,
505                                  struct lnet_msg, msg_list);
506
507                 list_del(&msg->msg_list);
508
509                 /*
510                  * NB drops and regains the lnet lock if it actually does
511                  * anything, so my finalizing friends can chomp along too
512                  */
513                 rc = lnet_complete_msg_locked(msg, cpt);
514                 if (rc)
515                         break;
516         }
517
518         if (unlikely(!list_empty(&the_lnet.ln_delay_rules))) {
519                 lnet_net_unlock(cpt);
520                 lnet_delay_rule_check();
521                 lnet_net_lock(cpt);
522         }
523
524         container->msc_finalizers[my_slot] = NULL;
525         lnet_net_unlock(cpt);
526
527         if (rc)
528                 goto again;
529 }
530 EXPORT_SYMBOL(lnet_finalize);
531
532 void
533 lnet_msg_container_cleanup(struct lnet_msg_container *container)
534 {
535         int count = 0;
536
537         if (!container->msc_init)
538                 return;
539
540         while (!list_empty(&container->msc_active)) {
541                 struct lnet_msg *msg;
542
543                 msg = list_entry(container->msc_active.next,
544                                  struct lnet_msg, msg_activelist);
545                 LASSERT(msg->msg_onactivelist);
546                 msg->msg_onactivelist = 0;
547                 list_del(&msg->msg_activelist);
548                 lnet_msg_free(msg);
549                 count++;
550         }
551
552         if (count > 0)
553                 CERROR("%d active msg on exit\n", count);
554
555         if (container->msc_finalizers) {
556                 LIBCFS_FREE(container->msc_finalizers,
557                             container->msc_nfinalizers *
558                             sizeof(*container->msc_finalizers));
559                 container->msc_finalizers = NULL;
560         }
561         container->msc_init = 0;
562 }
563
564 int
565 lnet_msg_container_setup(struct lnet_msg_container *container, int cpt)
566 {
567         container->msc_init = 1;
568
569         INIT_LIST_HEAD(&container->msc_active);
570         INIT_LIST_HEAD(&container->msc_finalizing);
571
572         /* number of CPUs */
573         container->msc_nfinalizers = cfs_cpt_weight(lnet_cpt_table(), cpt);
574
575         LIBCFS_CPT_ALLOC(container->msc_finalizers, lnet_cpt_table(), cpt,
576                          container->msc_nfinalizers *
577                          sizeof(*container->msc_finalizers));
578
579         if (!container->msc_finalizers) {
580                 CERROR("Failed to allocate message finalizers\n");
581                 lnet_msg_container_cleanup(container);
582                 return -ENOMEM;
583         }
584
585         return 0;
586 }
587
588 void
589 lnet_msg_containers_destroy(void)
590 {
591         struct lnet_msg_container *container;
592         int i;
593
594         if (!the_lnet.ln_msg_containers)
595                 return;
596
597         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers)
598                 lnet_msg_container_cleanup(container);
599
600         cfs_percpt_free(the_lnet.ln_msg_containers);
601         the_lnet.ln_msg_containers = NULL;
602 }
603
604 int
605 lnet_msg_containers_create(void)
606 {
607         struct lnet_msg_container *container;
608         int rc;
609         int i;
610
611         the_lnet.ln_msg_containers = cfs_percpt_alloc(lnet_cpt_table(),
612                                                       sizeof(*container));
613
614         if (!the_lnet.ln_msg_containers) {
615                 CERROR("Failed to allocate cpu-partition data for network\n");
616                 return -ENOMEM;
617         }
618
619         cfs_percpt_for_each(container, i, the_lnet.ln_msg_containers) {
620                 rc = lnet_msg_container_setup(container, i);
621                 if (rc) {
622                         lnet_msg_containers_destroy();
623                         return rc;
624                 }
625         }
626
627         return 0;
628 }