GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / staging / lustre / lustre / ldlm / ldlm_request.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.gnu.org/licenses/gpl-2.0.html
19  *
20  * GPL HEADER END
21  */
22 /*
23  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24  * Use is subject to license terms.
25  *
26  * Copyright (c) 2010, 2015, Intel Corporation.
27  */
28 /*
29  * This file is part of Lustre, http://www.lustre.org/
30  * Lustre is a trademark of Sun Microsystems, Inc.
31  */
32 /**
33  * This file contains Asynchronous System Trap (AST) handlers and related
34  * LDLM request-processing routines.
35  *
36  * An AST is a callback issued on a lock when its state is changed. There are
37  * several different types of ASTs (callbacks) registered for each lock:
38  *
39  * - completion AST: when a lock is enqueued by some process, but cannot be
40  *   granted immediately due to other conflicting locks on the same resource,
41  *   the completion AST is sent to notify the caller when the lock is
42  *   eventually granted
43  *
44  * - blocking AST: when a lock is granted to some process, if another process
45  *   enqueues a conflicting (blocking) lock on a resource, a blocking AST is
46  *   sent to notify the holder(s) of the lock(s) of the conflicting lock
47  *   request. The lock holder(s) must release their lock(s) on that resource in
48  *   a timely manner or be evicted by the server.
49  *
50  * - glimpse AST: this is used when a process wants information about a lock
51  *   (i.e. the lock value block (LVB)) but does not necessarily require holding
52  *   the lock. If the resource is locked, the lock holder(s) are sent glimpse
53  *   ASTs and the LVB is returned to the caller, and lock holder(s) may CANCEL
54  *   their lock(s) if they are idle. If the resource is not locked, the server
55  *   may grant the lock.
56  */
57
58 #define DEBUG_SUBSYSTEM S_LDLM
59
60 #include "../include/lustre_dlm.h"
61 #include "../include/obd_class.h"
62 #include "../include/obd.h"
63
64 #include "ldlm_internal.h"
65
66 unsigned int ldlm_enqueue_min = OBD_TIMEOUT_DEFAULT;
67 module_param(ldlm_enqueue_min, uint, 0644);
68 MODULE_PARM_DESC(ldlm_enqueue_min, "lock enqueue timeout minimum");
69
70 /* in client side, whether the cached locks will be canceled before replay */
71 unsigned int ldlm_cancel_unused_locks_before_replay = 1;
72
73 static void interrupted_completion_wait(void *data)
74 {
75 }
76
77 struct lock_wait_data {
78         struct ldlm_lock *lwd_lock;
79         __u32        lwd_conn_cnt;
80 };
81
82 struct ldlm_async_args {
83         struct lustre_handle lock_handle;
84 };
85
86 static int ldlm_expired_completion_wait(void *data)
87 {
88         struct lock_wait_data *lwd = data;
89         struct ldlm_lock *lock = lwd->lwd_lock;
90         struct obd_import *imp;
91         struct obd_device *obd;
92
93         if (!lock->l_conn_export) {
94                 static unsigned long next_dump, last_dump;
95
96                 LCONSOLE_WARN("lock timed out (enqueued at %lld, %llds ago)\n",
97                               (s64)lock->l_last_activity,
98                               (s64)(ktime_get_real_seconds() -
99                                     lock->l_last_activity));
100                 LDLM_DEBUG(lock, "lock timed out (enqueued at %lld, %llds ago); not entering recovery in server code, just going back to sleep",
101                            (s64)lock->l_last_activity,
102                            (s64)(ktime_get_real_seconds() -
103                                  lock->l_last_activity));
104                 if (cfs_time_after(cfs_time_current(), next_dump)) {
105                         last_dump = next_dump;
106                         next_dump = cfs_time_shift(300);
107                         ldlm_namespace_dump(D_DLMTRACE,
108                                             ldlm_lock_to_ns(lock));
109                         if (last_dump == 0)
110                                 libcfs_debug_dumplog();
111                 }
112                 return 0;
113         }
114
115         obd = lock->l_conn_export->exp_obd;
116         imp = obd->u.cli.cl_import;
117         ptlrpc_fail_import(imp, lwd->lwd_conn_cnt);
118         LDLM_ERROR(lock, "lock timed out (enqueued at %lld, %llds ago), entering recovery for %s@%s",
119                    (s64)lock->l_last_activity,
120                    (s64)(ktime_get_real_seconds() - lock->l_last_activity),
121                    obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
122
123         return 0;
124 }
125
126 /**
127  * Calculate the Completion timeout (covering enqueue, BL AST, data flush,
128  * lock cancel, and their replies). Used for lock completion timeout on the
129  * client side.
130  *
131  * \param[in] lock      lock which is waiting the completion callback
132  *
133  * \retval              timeout in seconds to wait for the server reply
134  */
135 /* We use the same basis for both server side and client side functions
136  * from a single node.
137  */
138 static unsigned int ldlm_cp_timeout(struct ldlm_lock *lock)
139 {
140         unsigned int timeout;
141
142         if (AT_OFF)
143                 return obd_timeout;
144
145         /*
146          * Wait a long time for enqueue - server may have to callback a
147          * lock from another client.  Server will evict the other client if it
148          * doesn't respond reasonably, and then give us the lock.
149          */
150         timeout = at_get(ldlm_lock_to_ns_at(lock));
151         return max(3 * timeout, ldlm_enqueue_min);
152 }
153
154 /**
155  * Helper function for ldlm_completion_ast(), updating timings when lock is
156  * actually granted.
157  */
158 static int ldlm_completion_tail(struct ldlm_lock *lock, void *data)
159 {
160         long delay;
161         int result = 0;
162
163         if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
164                 LDLM_DEBUG(lock, "client-side enqueue: destroyed");
165                 result = -EIO;
166         } else if (!data) {
167                 LDLM_DEBUG(lock, "client-side enqueue: granted");
168         } else {
169                 /* Take into AT only CP RPC, not immediately granted locks */
170                 delay = ktime_get_real_seconds() - lock->l_last_activity;
171                 LDLM_DEBUG(lock, "client-side enqueue: granted after %lds",
172                            delay);
173
174                 /* Update our time estimate */
175                 at_measured(ldlm_lock_to_ns_at(lock), delay);
176         }
177         return result;
178 }
179
180 /**
181  * Implementation of ->l_completion_ast() for a client, that doesn't wait
182  * until lock is granted. Suitable for locks enqueued through ptlrpcd, of
183  * other threads that cannot block for long.
184  */
185 int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
186 {
187         if (flags == LDLM_FL_WAIT_NOREPROC) {
188                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
189                 return 0;
190         }
191
192         if (!(flags & LDLM_FL_BLOCKED_MASK)) {
193                 wake_up(&lock->l_waitq);
194                 return ldlm_completion_tail(lock, data);
195         }
196
197         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, going forward");
198         return 0;
199 }
200 EXPORT_SYMBOL(ldlm_completion_ast_async);
201
202 /**
203  * Generic LDLM "completion" AST. This is called in several cases:
204  *
205  *     - when a reply to an ENQUEUE RPC is received from the server
206  *       (ldlm_cli_enqueue_fini()). Lock might be granted or not granted at
207  *       this point (determined by flags);
208  *
209  *     - when LDLM_CP_CALLBACK RPC comes to client to notify it that lock has
210  *       been granted;
211  *
212  *     - when ldlm_lock_match(LDLM_FL_LVB_READY) is about to wait until lock
213  *       gets correct lvb;
214  *
215  *     - to force all locks when resource is destroyed (cleanup_resource());
216  *
217  *     - during lock conversion (not used currently).
218  *
219  * If lock is not granted in the first case, this function waits until second
220  * or penultimate cases happen in some other thread.
221  *
222  */
223 int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
224 {
225         /* XXX ALLOCATE - 160 bytes */
226         struct lock_wait_data lwd;
227         struct obd_device *obd;
228         struct obd_import *imp = NULL;
229         struct l_wait_info lwi;
230         __u32 timeout;
231         int rc = 0;
232
233         if (flags == LDLM_FL_WAIT_NOREPROC) {
234                 LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
235                 goto noreproc;
236         }
237
238         if (!(flags & LDLM_FL_BLOCKED_MASK)) {
239                 wake_up(&lock->l_waitq);
240                 return 0;
241         }
242
243         LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, sleeping");
244
245 noreproc:
246
247         obd = class_exp2obd(lock->l_conn_export);
248
249         /* if this is a local lock, then there is no import */
250         if (obd)
251                 imp = obd->u.cli.cl_import;
252
253         timeout = ldlm_cp_timeout(lock);
254
255         lwd.lwd_lock = lock;
256         lock->l_last_activity = ktime_get_real_seconds();
257
258         if (ldlm_is_no_timeout(lock)) {
259                 LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
260                 lwi = LWI_INTR(interrupted_completion_wait, &lwd);
261         } else {
262                 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
263                                        ldlm_expired_completion_wait,
264                                        interrupted_completion_wait, &lwd);
265         }
266
267         if (imp) {
268                 spin_lock(&imp->imp_lock);
269                 lwd.lwd_conn_cnt = imp->imp_conn_cnt;
270                 spin_unlock(&imp->imp_lock);
271         }
272
273         if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
274                                  OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
275                 ldlm_set_fail_loc(lock);
276                 rc = -EINTR;
277         } else {
278                 /* Go to sleep until the lock is granted or cancelled. */
279                 rc = l_wait_event(lock->l_waitq,
280                                   is_granted_or_cancelled(lock), &lwi);
281         }
282
283         if (rc) {
284                 LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
285                            rc);
286                 return rc;
287         }
288
289         return ldlm_completion_tail(lock, data);
290 }
291 EXPORT_SYMBOL(ldlm_completion_ast);
292
293 static void failed_lock_cleanup(struct ldlm_namespace *ns,
294                                 struct ldlm_lock *lock, int mode)
295 {
296         int need_cancel = 0;
297
298         /* Set a flag to prevent us from sending a CANCEL (bug 407) */
299         lock_res_and_lock(lock);
300         /* Check that lock is not granted or failed, we might race. */
301         if ((lock->l_req_mode != lock->l_granted_mode) &&
302             !ldlm_is_failed(lock)) {
303                 /* Make sure that this lock will not be found by raced
304                  * bl_ast and -EINVAL reply is sent to server anyways.
305                  * bug 17645
306                  */
307                 lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_FAILED |
308                                  LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING;
309                 need_cancel = 1;
310         }
311         unlock_res_and_lock(lock);
312
313         if (need_cancel)
314                 LDLM_DEBUG(lock,
315                            "setting FL_LOCAL_ONLY | LDLM_FL_FAILED | LDLM_FL_ATOMIC_CB | LDLM_FL_CBPENDING");
316         else
317                 LDLM_DEBUG(lock, "lock was granted or failed in race");
318
319         /* XXX - HACK because we shouldn't call ldlm_lock_destroy()
320          *       from llite/file.c/ll_file_flock().
321          */
322         /* This code makes for the fact that we do not have blocking handler on
323          * a client for flock locks. As such this is the place where we must
324          * completely kill failed locks. (interrupted and those that
325          * were waiting to be granted when server evicted us.
326          */
327         if (lock->l_resource->lr_type == LDLM_FLOCK) {
328                 lock_res_and_lock(lock);
329                 if (!ldlm_is_destroyed(lock)) {
330                         ldlm_resource_unlink_lock(lock);
331                         ldlm_lock_decref_internal_nolock(lock, mode);
332                         ldlm_lock_destroy_nolock(lock);
333                 }
334                 unlock_res_and_lock(lock);
335         } else {
336                 ldlm_lock_decref_internal(lock, mode);
337         }
338 }
339
340 /**
341  * Finishing portion of client lock enqueue code.
342  *
343  * Called after receiving reply from server.
344  */
345 int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
346                           enum ldlm_type type, __u8 with_policy,
347                           enum ldlm_mode mode,
348                           __u64 *flags, void *lvb, __u32 lvb_len,
349                           const struct lustre_handle *lockh, int rc)
350 {
351         struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
352         int is_replay = *flags & LDLM_FL_REPLAY;
353         struct ldlm_lock *lock;
354         struct ldlm_reply *reply;
355         int cleanup_phase = 1;
356
357         lock = ldlm_handle2lock(lockh);
358         /* ldlm_cli_enqueue is holding a reference on this lock. */
359         if (!lock) {
360                 LASSERT(type == LDLM_FLOCK);
361                 return -ENOLCK;
362         }
363
364         LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
365                  "lvb_len = %d, l_lvb_len = %d\n", lvb_len, lock->l_lvb_len);
366
367         if (rc != ELDLM_OK) {
368                 LASSERT(!is_replay);
369                 LDLM_DEBUG(lock, "client-side enqueue END (%s)",
370                            rc == ELDLM_LOCK_ABORTED ? "ABORTED" : "FAILED");
371
372                 if (rc != ELDLM_LOCK_ABORTED)
373                         goto cleanup;
374         }
375
376         /* Before we return, swab the reply */
377         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
378         if (!reply) {
379                 rc = -EPROTO;
380                 goto cleanup;
381         }
382
383         if (lvb_len > 0) {
384                 int size = 0;
385
386                 size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
387                                             RCL_SERVER);
388                 if (size < 0) {
389                         LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", size);
390                         rc = size;
391                         goto cleanup;
392                 } else if (unlikely(size > lvb_len)) {
393                         LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
394                                    lvb_len, size);
395                         rc = -EINVAL;
396                         goto cleanup;
397                 }
398                 lvb_len = size;
399         }
400
401         if (rc == ELDLM_LOCK_ABORTED) {
402                 if (lvb_len > 0 && lvb)
403                         rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
404                                            lvb, lvb_len);
405                 if (rc == 0)
406                         rc = ELDLM_LOCK_ABORTED;
407                 goto cleanup;
408         }
409
410         /* lock enqueued on the server */
411         cleanup_phase = 0;
412
413         lock_res_and_lock(lock);
414         /* Key change rehash lock in per-export hash with new key */
415         if (exp->exp_lock_hash) {
416                 /* In the function below, .hs_keycmp resolves to
417                  * ldlm_export_lock_keycmp()
418                  */
419                 /* coverity[overrun-buffer-val] */
420                 cfs_hash_rehash_key(exp->exp_lock_hash,
421                                     &lock->l_remote_handle,
422                                     &reply->lock_handle,
423                                     &lock->l_exp_hash);
424         } else {
425                 lock->l_remote_handle = reply->lock_handle;
426         }
427
428         *flags = ldlm_flags_from_wire(reply->lock_flags);
429         lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
430                                               LDLM_FL_INHERIT_MASK);
431         unlock_res_and_lock(lock);
432
433         CDEBUG(D_INFO, "local: %p, remote cookie: %#llx, flags: 0x%llx\n",
434                lock, reply->lock_handle.cookie, *flags);
435
436         /* If enqueue returned a blocked lock but the completion handler has
437          * already run, then it fixed up the resource and we don't need to do it
438          * again.
439          */
440         if ((*flags) & LDLM_FL_LOCK_CHANGED) {
441                 int newmode = reply->lock_desc.l_req_mode;
442
443                 LASSERT(!is_replay);
444                 if (newmode && newmode != lock->l_req_mode) {
445                         LDLM_DEBUG(lock, "server returned different mode %s",
446                                    ldlm_lockname[newmode]);
447                         lock->l_req_mode = newmode;
448                 }
449
450                 if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name,
451                                  &lock->l_resource->lr_name)) {
452                         CDEBUG(D_INFO, "remote intent success, locking "DLDLMRES
453                                        " instead of "DLDLMRES"\n",
454                                PLDLMRES(&reply->lock_desc.l_resource),
455                                PLDLMRES(lock->l_resource));
456
457                         rc = ldlm_lock_change_resource(ns, lock,
458                                         &reply->lock_desc.l_resource.lr_name);
459                         if (rc || !lock->l_resource) {
460                                 rc = -ENOMEM;
461                                 goto cleanup;
462                         }
463                         LDLM_DEBUG(lock, "client-side enqueue, new resource");
464                 }
465                 if (with_policy)
466                         if (!(type == LDLM_IBITS &&
467                               !(exp_connect_flags(exp) & OBD_CONNECT_IBITS)))
468                                 /* We assume lock type cannot change on server*/
469                                 ldlm_convert_policy_to_local(exp,
470                                                 lock->l_resource->lr_type,
471                                                 &reply->lock_desc.l_policy_data,
472                                                 &lock->l_policy_data);
473                 if (type != LDLM_PLAIN)
474                         LDLM_DEBUG(lock,
475                                    "client-side enqueue, new policy data");
476         }
477
478         if ((*flags) & LDLM_FL_AST_SENT ||
479             /* Cancel extent locks as soon as possible on a liblustre client,
480              * because it cannot handle asynchronous ASTs robustly (see
481              * bug 7311).
482              */
483             (LIBLUSTRE_CLIENT && type == LDLM_EXTENT)) {
484                 lock_res_and_lock(lock);
485                 lock->l_flags |= LDLM_FL_CBPENDING |  LDLM_FL_BL_AST;
486                 unlock_res_and_lock(lock);
487                 LDLM_DEBUG(lock, "enqueue reply includes blocking AST");
488         }
489
490         /* If the lock has already been granted by a completion AST, don't
491          * clobber the LVB with an older one.
492          */
493         if (lvb_len > 0) {
494                 /* We must lock or a racing completion might update lvb without
495                  * letting us know and we'll clobber the correct value.
496                  * Cannot unlock after the check either, as that still leaves
497                  * a tiny window for completion to get in
498                  */
499                 lock_res_and_lock(lock);
500                 if (lock->l_req_mode != lock->l_granted_mode)
501                         rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
502                                            lock->l_lvb_data, lvb_len);
503                 unlock_res_and_lock(lock);
504                 if (rc < 0) {
505                         cleanup_phase = 1;
506                         goto cleanup;
507                 }
508         }
509
510         if (!is_replay) {
511                 rc = ldlm_lock_enqueue(ns, &lock, NULL, flags);
512                 if (lock->l_completion_ast) {
513                         int err = lock->l_completion_ast(lock, *flags, NULL);
514
515                         if (!rc)
516                                 rc = err;
517                         if (rc)
518                                 cleanup_phase = 1;
519                 }
520         }
521
522         if (lvb_len > 0 && lvb) {
523                 /* Copy the LVB here, and not earlier, because the completion
524                  * AST (if any) can override what we got in the reply
525                  */
526                 memcpy(lvb, lock->l_lvb_data, lvb_len);
527         }
528
529         LDLM_DEBUG(lock, "client-side enqueue END");
530 cleanup:
531         if (cleanup_phase == 1 && rc)
532                 failed_lock_cleanup(ns, lock, mode);
533         /* Put lock 2 times, the second reference is held by ldlm_cli_enqueue */
534         LDLM_LOCK_PUT(lock);
535         LDLM_LOCK_RELEASE(lock);
536         return rc;
537 }
538 EXPORT_SYMBOL(ldlm_cli_enqueue_fini);
539
540 /**
541  * Estimate number of lock handles that would fit into request of given
542  * size.  PAGE_SIZE-512 is to allow TCP/IP and LNET headers to fit into
543  * a single page on the send/receive side. XXX: 512 should be changed to
544  * more adequate value.
545  */
546 static inline int ldlm_req_handles_avail(int req_size, int off)
547 {
548         int avail;
549
550         avail = min_t(int, LDLM_MAXREQSIZE, PAGE_SIZE - 512) - req_size;
551         if (likely(avail >= 0))
552                 avail /= (int)sizeof(struct lustre_handle);
553         else
554                 avail = 0;
555         avail += LDLM_LOCKREQ_HANDLES - off;
556
557         return avail;
558 }
559
560 static inline int ldlm_capsule_handles_avail(struct req_capsule *pill,
561                                              enum req_location loc,
562                                              int off)
563 {
564         u32 size = req_capsule_msg_size(pill, loc);
565
566         return ldlm_req_handles_avail(size, off);
567 }
568
569 static inline int ldlm_format_handles_avail(struct obd_import *imp,
570                                             const struct req_format *fmt,
571                                             enum req_location loc, int off)
572 {
573         u32 size = req_capsule_fmt_size(imp->imp_msg_magic, fmt, loc);
574
575         return ldlm_req_handles_avail(size, off);
576 }
577
578 /**
579  * Cancel LRU locks and pack them into the enqueue request. Pack there the given
580  * \a count locks in \a cancels.
581  *
582  * This is to be called by functions preparing their own requests that
583  * might contain lists of locks to cancel in addition to actual operation
584  * that needs to be performed.
585  */
586 int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
587                       int version, int opc, int canceloff,
588                       struct list_head *cancels, int count)
589 {
590         struct ldlm_namespace   *ns = exp->exp_obd->obd_namespace;
591         struct req_capsule      *pill = &req->rq_pill;
592         struct ldlm_request     *dlm = NULL;
593         int flags, avail, to_free, pack = 0;
594         LIST_HEAD(head);
595         int rc;
596
597         if (!cancels)
598                 cancels = &head;
599         if (ns_connect_cancelset(ns)) {
600                 /* Estimate the amount of available space in the request. */
601                 req_capsule_filled_sizes(pill, RCL_CLIENT);
602                 avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
603
604                 flags = ns_connect_lru_resize(ns) ?
605                         LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
606                 to_free = !ns_connect_lru_resize(ns) &&
607                           opc == LDLM_ENQUEUE ? 1 : 0;
608
609                 /* Cancel LRU locks here _only_ if the server supports
610                  * EARLY_CANCEL. Otherwise we have to send extra CANCEL
611                  * RPC, which will make us slower.
612                  */
613                 if (avail > count)
614                         count += ldlm_cancel_lru_local(ns, cancels, to_free,
615                                                        avail - count, 0, flags);
616                 if (avail > count)
617                         pack = count;
618                 else
619                         pack = avail;
620                 req_capsule_set_size(pill, &RMF_DLM_REQ, RCL_CLIENT,
621                                      ldlm_request_bufsize(pack, opc));
622         }
623
624         rc = ptlrpc_request_pack(req, version, opc);
625         if (rc) {
626                 ldlm_lock_list_put(cancels, l_bl_ast, count);
627                 return rc;
628         }
629
630         if (ns_connect_cancelset(ns)) {
631                 if (canceloff) {
632                         dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
633                         LASSERT(dlm);
634                         /* Skip first lock handler in ldlm_request_pack(),
635                          * this method will increment @lock_count according
636                          * to the lock handle amount actually written to
637                          * the buffer.
638                          */
639                         dlm->lock_count = canceloff;
640                 }
641                 /* Pack into the request @pack lock handles. */
642                 ldlm_cli_cancel_list(cancels, pack, req, 0);
643                 /* Prepare and send separate cancel RPC for others. */
644                 ldlm_cli_cancel_list(cancels, count - pack, NULL, 0);
645         } else {
646                 ldlm_lock_list_put(cancels, l_bl_ast, count);
647         }
648         return 0;
649 }
650 EXPORT_SYMBOL(ldlm_prep_elc_req);
651
652 int ldlm_prep_enqueue_req(struct obd_export *exp, struct ptlrpc_request *req,
653                           struct list_head *cancels, int count)
654 {
655         return ldlm_prep_elc_req(exp, req, LUSTRE_DLM_VERSION, LDLM_ENQUEUE,
656                                  LDLM_ENQUEUE_CANCEL_OFF, cancels, count);
657 }
658 EXPORT_SYMBOL(ldlm_prep_enqueue_req);
659
660 /**
661  * Client-side lock enqueue.
662  *
663  * If a request has some specific initialisation it is passed in \a reqp,
664  * otherwise it is created in ldlm_cli_enqueue.
665  *
666  * Supports sync and async requests, pass \a async flag accordingly. If a
667  * request was created in ldlm_cli_enqueue and it is the async request,
668  * pass it to the caller in \a reqp.
669  */
670 int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
671                      struct ldlm_enqueue_info *einfo,
672                      const struct ldlm_res_id *res_id,
673                      ldlm_policy_data_t const *policy, __u64 *flags,
674                      void *lvb, __u32 lvb_len, enum lvb_type lvb_type,
675                      struct lustre_handle *lockh, int async)
676 {
677         struct ldlm_namespace *ns;
678         struct ldlm_lock      *lock;
679         struct ldlm_request   *body;
680         int                 is_replay = *flags & LDLM_FL_REPLAY;
681         int                 req_passed_in = 1;
682         int                 rc, err;
683         struct ptlrpc_request *req;
684
685         ns = exp->exp_obd->obd_namespace;
686
687         /* If we're replaying this lock, just check some invariants.
688          * If we're creating a new lock, get everything all setup nicely.
689          */
690         if (is_replay) {
691                 lock = ldlm_handle2lock_long(lockh, 0);
692                 LASSERT(lock);
693                 LDLM_DEBUG(lock, "client-side enqueue START");
694                 LASSERT(exp == lock->l_conn_export);
695         } else {
696                 const struct ldlm_callback_suite cbs = {
697                         .lcs_completion = einfo->ei_cb_cp,
698                         .lcs_blocking   = einfo->ei_cb_bl,
699                         .lcs_glimpse    = einfo->ei_cb_gl
700                 };
701                 lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
702                                         einfo->ei_mode, &cbs, einfo->ei_cbdata,
703                                         lvb_len, lvb_type);
704                 if (IS_ERR(lock))
705                         return PTR_ERR(lock);
706                 /* for the local lock, add the reference */
707                 ldlm_lock_addref_internal(lock, einfo->ei_mode);
708                 ldlm_lock2handle(lock, lockh);
709                 if (policy)
710                         lock->l_policy_data = *policy;
711
712                 if (einfo->ei_type == LDLM_EXTENT) {
713                         /* extent lock without policy is a bug */
714                         if (!policy)
715                                 LBUG();
716
717                         lock->l_req_extent = policy->l_extent;
718                 }
719                 LDLM_DEBUG(lock, "client-side enqueue START, flags %llx",
720                            *flags);
721         }
722
723         lock->l_conn_export = exp;
724         lock->l_export = NULL;
725         lock->l_blocking_ast = einfo->ei_cb_bl;
726         lock->l_flags |= (*flags & (LDLM_FL_NO_LRU | LDLM_FL_EXCL));
727         lock->l_last_activity = ktime_get_real_seconds();
728
729         /* lock not sent to server yet */
730
731         if (!reqp || !*reqp) {
732                 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
733                                                 &RQF_LDLM_ENQUEUE,
734                                                 LUSTRE_DLM_VERSION,
735                                                 LDLM_ENQUEUE);
736                 if (!req) {
737                         failed_lock_cleanup(ns, lock, einfo->ei_mode);
738                         LDLM_LOCK_RELEASE(lock);
739                         return -ENOMEM;
740                 }
741                 req_passed_in = 0;
742                 if (reqp)
743                         *reqp = req;
744         } else {
745                 int len;
746
747                 req = *reqp;
748                 len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ,
749                                            RCL_CLIENT);
750                 LASSERTF(len >= sizeof(*body), "buflen[%d] = %d, not %d\n",
751                          DLM_LOCKREQ_OFF, len, (int)sizeof(*body));
752         }
753
754         /* Dump lock data into the request buffer */
755         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
756         ldlm_lock2desc(lock, &body->lock_desc);
757         body->lock_flags = ldlm_flags_to_wire(*flags);
758         body->lock_handle[0] = *lockh;
759
760         /* Continue as normal. */
761         if (!req_passed_in) {
762                 if (lvb_len > 0)
763                         req_capsule_extend(&req->rq_pill,
764                                            &RQF_LDLM_ENQUEUE_LVB);
765                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
766                                      lvb_len);
767                 ptlrpc_request_set_replen(req);
768         }
769
770         /*
771          * Liblustre client doesn't get extent locks, except for O_APPEND case
772          * where [0, OBD_OBJECT_EOF] lock is taken, or truncate, where
773          * [i_size, OBD_OBJECT_EOF] lock is taken.
774          */
775         LASSERT(ergo(LIBLUSTRE_CLIENT, einfo->ei_type != LDLM_EXTENT ||
776                      policy->l_extent.end == OBD_OBJECT_EOF));
777
778         if (async) {
779                 LASSERT(reqp);
780                 return 0;
781         }
782
783         LDLM_DEBUG(lock, "sending request");
784
785         rc = ptlrpc_queue_wait(req);
786
787         err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0,
788                                     einfo->ei_mode, flags, lvb, lvb_len,
789                                     lockh, rc);
790
791         /* If ldlm_cli_enqueue_fini did not find the lock, we need to free
792          * one reference that we took
793          */
794         if (err == -ENOLCK)
795                 LDLM_LOCK_RELEASE(lock);
796         else
797                 rc = err;
798
799         if (!req_passed_in && req) {
800                 ptlrpc_req_finished(req);
801                 if (reqp)
802                         *reqp = NULL;
803         }
804
805         return rc;
806 }
807 EXPORT_SYMBOL(ldlm_cli_enqueue);
808
809 /**
810  * Cancel locks locally.
811  * Returns:
812  * \retval LDLM_FL_LOCAL_ONLY if there is no need for a CANCEL RPC to the server
813  * \retval LDLM_FL_CANCELING otherwise;
814  * \retval LDLM_FL_BL_AST if there is a need for a separate CANCEL RPC.
815  */
816 static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
817 {
818         __u64 rc = LDLM_FL_LOCAL_ONLY;
819
820         if (lock->l_conn_export) {
821                 bool local_only;
822
823                 LDLM_DEBUG(lock, "client-side cancel");
824                 /* Set this flag to prevent others from getting new references*/
825                 lock_res_and_lock(lock);
826                 ldlm_set_cbpending(lock);
827                 local_only = !!(lock->l_flags &
828                                 (LDLM_FL_LOCAL_ONLY | LDLM_FL_CANCEL_ON_BLOCK));
829                 ldlm_cancel_callback(lock);
830                 rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
831                 unlock_res_and_lock(lock);
832
833                 if (local_only) {
834                         CDEBUG(D_DLMTRACE, "not sending request (at caller's instruction)\n");
835                         rc = LDLM_FL_LOCAL_ONLY;
836                 }
837                 ldlm_lock_cancel(lock);
838         } else {
839                 LDLM_ERROR(lock, "Trying to cancel local lock");
840                 LBUG();
841         }
842
843         return rc;
844 }
845
846 /**
847  * Pack \a count locks in \a head into ldlm_request buffer of request \a req.
848  */
849 static void ldlm_cancel_pack(struct ptlrpc_request *req,
850                              struct list_head *head, int count)
851 {
852         struct ldlm_request *dlm;
853         struct ldlm_lock *lock;
854         int max, packed = 0;
855
856         dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
857         LASSERT(dlm);
858
859         /* Check the room in the request buffer. */
860         max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
861                 sizeof(struct ldlm_request);
862         max /= sizeof(struct lustre_handle);
863         max += LDLM_LOCKREQ_HANDLES;
864         LASSERT(max >= dlm->lock_count + count);
865
866         /* XXX: it would be better to pack lock handles grouped by resource.
867          * so that the server cancel would call filter_lvbo_update() less
868          * frequently.
869          */
870         list_for_each_entry(lock, head, l_bl_ast) {
871                 if (!count--)
872                         break;
873                 LASSERT(lock->l_conn_export);
874                 /* Pack the lock handle to the given request buffer. */
875                 LDLM_DEBUG(lock, "packing");
876                 dlm->lock_handle[dlm->lock_count++] = lock->l_remote_handle;
877                 packed++;
878         }
879         CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
880 }
881
882 /**
883  * Prepare and send a batched cancel RPC. It will include \a count lock
884  * handles of locks given in \a cancels list.
885  */
886 static int ldlm_cli_cancel_req(struct obd_export *exp,
887                                struct list_head *cancels,
888                                int count, enum ldlm_cancel_flags flags)
889 {
890         struct ptlrpc_request *req = NULL;
891         struct obd_import *imp;
892         int free, sent = 0;
893         int rc = 0;
894
895         LASSERT(exp);
896         LASSERT(count > 0);
897
898         CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
899
900         if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
901                 return count;
902
903         free = ldlm_format_handles_avail(class_exp2cliimp(exp),
904                                          &RQF_LDLM_CANCEL, RCL_CLIENT, 0);
905         if (count > free)
906                 count = free;
907
908         while (1) {
909                 imp = class_exp2cliimp(exp);
910                 if (!imp || imp->imp_invalid) {
911                         CDEBUG(D_DLMTRACE,
912                                "skipping cancel on invalid import %p\n", imp);
913                         return count;
914                 }
915
916                 req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
917                 if (!req) {
918                         rc = -ENOMEM;
919                         goto out;
920                 }
921
922                 req_capsule_filled_sizes(&req->rq_pill, RCL_CLIENT);
923                 req_capsule_set_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT,
924                                      ldlm_request_bufsize(count, LDLM_CANCEL));
925
926                 rc = ptlrpc_request_pack(req, LUSTRE_DLM_VERSION, LDLM_CANCEL);
927                 if (rc) {
928                         ptlrpc_request_free(req);
929                         goto out;
930                 }
931
932                 req->rq_request_portal = LDLM_CANCEL_REQUEST_PORTAL;
933                 req->rq_reply_portal = LDLM_CANCEL_REPLY_PORTAL;
934                 ptlrpc_at_set_req_timeout(req);
935
936                 ldlm_cancel_pack(req, cancels, count);
937
938                 ptlrpc_request_set_replen(req);
939                 if (flags & LCF_ASYNC) {
940                         ptlrpcd_add_req(req);
941                         sent = count;
942                         goto out;
943                 }
944
945                 rc = ptlrpc_queue_wait(req);
946                 if (rc == LUSTRE_ESTALE) {
947                         CDEBUG(D_DLMTRACE, "client/server (nid %s) out of sync -- not fatal\n",
948                                libcfs_nid2str(req->rq_import->
949                                               imp_connection->c_peer.nid));
950                         rc = 0;
951                 } else if (rc == -ETIMEDOUT && /* check there was no reconnect*/
952                            req->rq_import_generation == imp->imp_generation) {
953                         ptlrpc_req_finished(req);
954                         continue;
955                 } else if (rc != ELDLM_OK) {
956                         /* -ESHUTDOWN is common on umount */
957                         CDEBUG_LIMIT(rc == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
958                                      "Got rc %d from cancel RPC: canceling anyway\n",
959                                      rc);
960                         break;
961                 }
962                 sent = count;
963                 break;
964         }
965
966         ptlrpc_req_finished(req);
967 out:
968         return sent ? sent : rc;
969 }
970
971 static inline struct ldlm_pool *ldlm_imp2pl(struct obd_import *imp)
972 {
973         return &imp->imp_obd->obd_namespace->ns_pool;
974 }
975
976 /**
977  * Update client's OBD pool related fields with new SLV and Limit from \a req.
978  */
979 int ldlm_cli_update_pool(struct ptlrpc_request *req)
980 {
981         struct obd_device *obd;
982         __u64 new_slv;
983         __u32 new_limit;
984
985         if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
986                      !imp_connect_lru_resize(req->rq_import))) {
987                 /*
988                  * Do nothing for corner cases.
989                  */
990                 return 0;
991         }
992
993         /* In some cases RPC may contain SLV and limit zeroed out. This
994          * is the case when server does not support LRU resize feature.
995          * This is also possible in some recovery cases when server-side
996          * reqs have no reference to the OBD export and thus access to
997          * server-side namespace is not possible.
998          */
999         if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
1000             lustre_msg_get_limit(req->rq_repmsg) == 0) {
1001                 DEBUG_REQ(D_HA, req,
1002                           "Zero SLV or Limit found (SLV: %llu, Limit: %u)",
1003                           lustre_msg_get_slv(req->rq_repmsg),
1004                           lustre_msg_get_limit(req->rq_repmsg));
1005                 return 0;
1006         }
1007
1008         new_limit = lustre_msg_get_limit(req->rq_repmsg);
1009         new_slv = lustre_msg_get_slv(req->rq_repmsg);
1010         obd = req->rq_import->imp_obd;
1011
1012         /* Set new SLV and limit in OBD fields to make them accessible
1013          * to the pool thread. We do not access obd_namespace and pool
1014          * directly here as there is no reliable way to make sure that
1015          * they are still alive at cleanup time. Evil races are possible
1016          * which may cause Oops at that time.
1017          */
1018         write_lock(&obd->obd_pool_lock);
1019         obd->obd_pool_slv = new_slv;
1020         obd->obd_pool_limit = new_limit;
1021         write_unlock(&obd->obd_pool_lock);
1022
1023         return 0;
1024 }
1025 EXPORT_SYMBOL(ldlm_cli_update_pool);
1026
1027 /**
1028  * Client side lock cancel.
1029  *
1030  * Lock must not have any readers or writers by this time.
1031  */
1032 int ldlm_cli_cancel(const struct lustre_handle *lockh,
1033                     enum ldlm_cancel_flags cancel_flags)
1034 {
1035         struct obd_export *exp;
1036         int avail, flags, count = 1;
1037         __u64 rc = 0;
1038         struct ldlm_namespace *ns;
1039         struct ldlm_lock *lock;
1040         LIST_HEAD(cancels);
1041
1042         /* concurrent cancels on the same handle can happen */
1043         lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
1044         if (!lock) {
1045                 LDLM_DEBUG_NOLOCK("lock is already being destroyed");
1046                 return 0;
1047         }
1048
1049         rc = ldlm_cli_cancel_local(lock);
1050         if (rc == LDLM_FL_LOCAL_ONLY || cancel_flags & LCF_LOCAL) {
1051                 LDLM_LOCK_RELEASE(lock);
1052                 return 0;
1053         }
1054         /* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
1055          * RPC which goes to canceld portal, so we can cancel other LRU locks
1056          * here and send them all as one LDLM_CANCEL RPC.
1057          */
1058         LASSERT(list_empty(&lock->l_bl_ast));
1059         list_add(&lock->l_bl_ast, &cancels);
1060
1061         exp = lock->l_conn_export;
1062         if (exp_connect_cancelset(exp)) {
1063                 avail = ldlm_format_handles_avail(class_exp2cliimp(exp),
1064                                                   &RQF_LDLM_CANCEL,
1065                                                   RCL_CLIENT, 0);
1066                 LASSERT(avail > 0);
1067
1068                 ns = ldlm_lock_to_ns(lock);
1069                 flags = ns_connect_lru_resize(ns) ?
1070                         LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
1071                 count += ldlm_cancel_lru_local(ns, &cancels, 0, avail - 1,
1072                                                LCF_BL_AST, flags);
1073         }
1074         ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
1075         return 0;
1076 }
1077 EXPORT_SYMBOL(ldlm_cli_cancel);
1078
1079 /**
1080  * Locally cancel up to \a count locks in list \a cancels.
1081  * Return the number of cancelled locks.
1082  */
1083 int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
1084                                enum ldlm_cancel_flags flags)
1085 {
1086         LIST_HEAD(head);
1087         struct ldlm_lock *lock, *next;
1088         int left = 0, bl_ast = 0;
1089         __u64 rc;
1090
1091         left = count;
1092         list_for_each_entry_safe(lock, next, cancels, l_bl_ast) {
1093                 if (left-- == 0)
1094                         break;
1095
1096                 if (flags & LCF_LOCAL) {
1097                         rc = LDLM_FL_LOCAL_ONLY;
1098                         ldlm_lock_cancel(lock);
1099                 } else {
1100                         rc = ldlm_cli_cancel_local(lock);
1101                 }
1102                 /* Until we have compound requests and can send LDLM_CANCEL
1103                  * requests batched with generic RPCs, we need to send cancels
1104                  * with the LDLM_FL_BL_AST flag in a separate RPC from
1105                  * the one being generated now.
1106                  */
1107                 if (!(flags & LCF_BL_AST) && (rc == LDLM_FL_BL_AST)) {
1108                         LDLM_DEBUG(lock, "Cancel lock separately");
1109                         list_del_init(&lock->l_bl_ast);
1110                         list_add(&lock->l_bl_ast, &head);
1111                         bl_ast++;
1112                         continue;
1113                 }
1114                 if (rc == LDLM_FL_LOCAL_ONLY) {
1115                         /* CANCEL RPC should not be sent to server. */
1116                         list_del_init(&lock->l_bl_ast);
1117                         LDLM_LOCK_RELEASE(lock);
1118                         count--;
1119                 }
1120         }
1121         if (bl_ast > 0) {
1122                 count -= bl_ast;
1123                 ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
1124         }
1125
1126         return count;
1127 }
1128 EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
1129
1130 /**
1131  * Cancel as many locks as possible w/o sending any RPCs (e.g. to write back
1132  * dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
1133  * readahead requests, ...)
1134  */
1135 static enum ldlm_policy_res
1136 ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1137                            int unused, int added, int count)
1138 {
1139         enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
1140
1141         /* don't check added & count since we want to process all locks
1142          * from unused list.
1143          * It's fine to not take lock to access lock->l_resource since
1144          * the lock has already been granted so it won't change.
1145          */
1146         switch (lock->l_resource->lr_type) {
1147         case LDLM_EXTENT:
1148         case LDLM_IBITS:
1149                 if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
1150                         break;
1151         default:
1152                 result = LDLM_POLICY_SKIP_LOCK;
1153                 lock_res_and_lock(lock);
1154                 ldlm_set_skipped(lock);
1155                 unlock_res_and_lock(lock);
1156                 break;
1157         }
1158
1159         return result;
1160 }
1161
1162 /**
1163  * Callback function for LRU-resize policy. Decides whether to keep
1164  * \a lock in LRU for current \a LRU size \a unused, added in current
1165  * scan \a added and number of locks to be preferably canceled \a count.
1166  *
1167  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1168  *
1169  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1170  */
1171 static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
1172                                                     struct ldlm_lock *lock,
1173                                                     int unused, int added,
1174                                                     int count)
1175 {
1176         unsigned long cur = cfs_time_current();
1177         struct ldlm_pool *pl = &ns->ns_pool;
1178         __u64 slv, lvf, lv;
1179         unsigned long la;
1180
1181         /* Stop LRU processing when we reach past @count or have checked all
1182          * locks in LRU.
1183          */
1184         if (count && added >= count)
1185                 return LDLM_POLICY_KEEP_LOCK;
1186
1187         slv = ldlm_pool_get_slv(pl);
1188         lvf = ldlm_pool_get_lvf(pl);
1189         la = cfs_duration_sec(cfs_time_sub(cur, lock->l_last_used));
1190         lv = lvf * la * unused;
1191
1192         /* Inform pool about current CLV to see it via debugfs. */
1193         ldlm_pool_set_clv(pl, lv);
1194
1195         /* Stop when SLV is not yet come from server or lv is smaller than
1196          * it is.
1197          */
1198         if (slv == 0 || lv < slv)
1199                 return LDLM_POLICY_KEEP_LOCK;
1200
1201         return LDLM_POLICY_CANCEL_LOCK;
1202 }
1203
1204 /**
1205  * Callback function for debugfs used policy. Makes decision whether to keep
1206  * \a lock in LRU for current \a LRU size \a unused, added in current scan \a
1207  * added and number of locks to be preferably canceled \a count.
1208  *
1209  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1210  *
1211  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1212  */
1213 static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
1214                                                       struct ldlm_lock *lock,
1215                                                       int unused, int added,
1216                                                       int count)
1217 {
1218         /* Stop LRU processing when we reach past @count or have checked all
1219          * locks in LRU.
1220          */
1221         return (added >= count) ?
1222                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1223 }
1224
1225 /**
1226  * Callback function for aged policy. Makes decision whether to keep \a lock in
1227  * LRU for current LRU size \a unused, added in current scan \a added and
1228  * number of locks to be preferably canceled \a count.
1229  *
1230  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1231  *
1232  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1233  */
1234 static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
1235                                                     struct ldlm_lock *lock,
1236                                                     int unused, int added,
1237                                                     int count)
1238 {
1239         if ((added >= count) &&
1240             time_before(cfs_time_current(),
1241                         cfs_time_add(lock->l_last_used, ns->ns_max_age)))
1242                 return LDLM_POLICY_KEEP_LOCK;
1243
1244         return LDLM_POLICY_CANCEL_LOCK;
1245 }
1246
1247 static enum ldlm_policy_res
1248 ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
1249                                 struct ldlm_lock *lock,
1250                                 int unused, int added,
1251                                 int count)
1252 {
1253         enum ldlm_policy_res result;
1254
1255         result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
1256         if (result == LDLM_POLICY_KEEP_LOCK)
1257                 return result;
1258
1259         return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
1260 }
1261
1262 /**
1263  * Callback function for default policy. Makes decision whether to keep \a lock
1264  * in LRU for current LRU size \a unused, added in current scan \a added and
1265  * number of locks to be preferably canceled \a count.
1266  *
1267  * \retval LDLM_POLICY_KEEP_LOCK keep lock in LRU in stop scanning
1268  *
1269  * \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
1270  */
1271 static enum ldlm_policy_res
1272 ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
1273                            int unused, int added, int count)
1274 {
1275         /* Stop LRU processing when we reach past count or have checked all
1276          * locks in LRU.
1277          */
1278         return (added >= count) ?
1279                 LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
1280 }
1281
1282 typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
1283                                                       struct ldlm_namespace *,
1284                                                       struct ldlm_lock *, int,
1285                                                       int, int);
1286
1287 static ldlm_cancel_lru_policy_t
1288 ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
1289 {
1290         if (flags & LDLM_CANCEL_NO_WAIT)
1291                 return ldlm_cancel_no_wait_policy;
1292
1293         if (ns_connect_lru_resize(ns)) {
1294                 if (flags & LDLM_CANCEL_SHRINK)
1295                         /* We kill passed number of old locks. */
1296                         return ldlm_cancel_passed_policy;
1297                 else if (flags & LDLM_CANCEL_LRUR)
1298                         return ldlm_cancel_lrur_policy;
1299                 else if (flags & LDLM_CANCEL_PASSED)
1300                         return ldlm_cancel_passed_policy;
1301                 else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
1302                         return ldlm_cancel_lrur_no_wait_policy;
1303         } else {
1304                 if (flags & LDLM_CANCEL_AGED)
1305                         return ldlm_cancel_aged_policy;
1306         }
1307
1308         return ldlm_cancel_default_policy;
1309 }
1310
1311 /**
1312  * - Free space in LRU for \a count new locks,
1313  *   redundant unused locks are canceled locally;
1314  * - also cancel locally unused aged locks;
1315  * - do not cancel more than \a max locks;
1316  * - GET the found locks and add them into the \a cancels list.
1317  *
1318  * A client lock can be added to the l_bl_ast list only when it is
1319  * marked LDLM_FL_CANCELING. Otherwise, somebody is already doing
1320  * CANCEL.  There are the following use cases:
1321  * ldlm_cancel_resource_local(), ldlm_cancel_lru_local() and
1322  * ldlm_cli_cancel(), which check and set this flag properly. As any
1323  * attempt to cancel a lock rely on this flag, l_bl_ast list is accessed
1324  * later without any special locking.
1325  *
1326  * Calling policies for enabled LRU resize:
1327  * ----------------------------------------
1328  * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to
1329  *                          cancel not more than \a count locks;
1330  *
1331  * flags & LDLM_CANCEL_PASSED - cancel \a count number of old locks (located at
1332  *                            the beginning of LRU list);
1333  *
1334  * flags & LDLM_CANCEL_SHRINK - cancel not more than \a count locks according to
1335  *                            memory pressure policy function;
1336  *
1337  * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy".
1338  *
1339  * flags & LDLM_CANCEL_NO_WAIT - cancel as many unused locks as possible
1340  *                             (typically before replaying locks) w/o
1341  *                             sending any RPCs or waiting for any
1342  *                             outstanding RPC to complete.
1343  */
1344 static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
1345                                  struct list_head *cancels, int count, int max,
1346                                  int flags)
1347 {
1348         ldlm_cancel_lru_policy_t pf;
1349         struct ldlm_lock *lock, *next;
1350         int added = 0, unused, remained;
1351         int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
1352
1353         spin_lock(&ns->ns_lock);
1354         unused = ns->ns_nr_unused;
1355         remained = unused;
1356
1357         if (!ns_connect_lru_resize(ns))
1358                 count += unused - ns->ns_max_unused;
1359
1360         pf = ldlm_cancel_lru_policy(ns, flags);
1361         LASSERT(pf);
1362
1363         while (!list_empty(&ns->ns_unused_list)) {
1364                 enum ldlm_policy_res result;
1365                 time_t last_use = 0;
1366
1367                 /* all unused locks */
1368                 if (remained-- <= 0)
1369                         break;
1370
1371                 /* For any flags, stop scanning if @max is reached. */
1372                 if (max && added >= max)
1373                         break;
1374
1375                 list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
1376                                          l_lru) {
1377                         /* No locks which got blocking requests. */
1378                         LASSERT(!ldlm_is_bl_ast(lock));
1379
1380                         if (no_wait && ldlm_is_skipped(lock))
1381                                 /* already processed */
1382                                 continue;
1383
1384                         last_use = lock->l_last_used;
1385                         if (last_use == cfs_time_current())
1386                                 continue;
1387
1388                         /* Somebody is already doing CANCEL. No need for this
1389                          * lock in LRU, do not traverse it again.
1390                          */
1391                         if (!ldlm_is_canceling(lock))
1392                                 break;
1393
1394                         ldlm_lock_remove_from_lru_nolock(lock);
1395                 }
1396                 if (&lock->l_lru == &ns->ns_unused_list)
1397                         break;
1398
1399                 LDLM_LOCK_GET(lock);
1400                 spin_unlock(&ns->ns_lock);
1401                 lu_ref_add(&lock->l_reference, __func__, current);
1402
1403                 /* Pass the lock through the policy filter and see if it
1404                  * should stay in LRU.
1405                  *
1406                  * Even for shrinker policy we stop scanning if
1407                  * we find a lock that should stay in the cache.
1408                  * We should take into account lock age anyway
1409                  * as a new lock is a valuable resource even if
1410                  * it has a low weight.
1411                  *
1412                  * That is, for shrinker policy we drop only
1413                  * old locks, but additionally choose them by
1414                  * their weight. Big extent locks will stay in
1415                  * the cache.
1416                  */
1417                 result = pf(ns, lock, unused, added, count);
1418                 if (result == LDLM_POLICY_KEEP_LOCK) {
1419                         lu_ref_del(&lock->l_reference,
1420                                    __func__, current);
1421                         LDLM_LOCK_RELEASE(lock);
1422                         spin_lock(&ns->ns_lock);
1423                         break;
1424                 }
1425                 if (result == LDLM_POLICY_SKIP_LOCK) {
1426                         lu_ref_del(&lock->l_reference,
1427                                    __func__, current);
1428                         LDLM_LOCK_RELEASE(lock);
1429                         spin_lock(&ns->ns_lock);
1430                         continue;
1431                 }
1432
1433                 lock_res_and_lock(lock);
1434                 /* Check flags again under the lock. */
1435                 if (ldlm_is_canceling(lock) ||
1436                     (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
1437                         /* Another thread is removing lock from LRU, or
1438                          * somebody is already doing CANCEL, or there
1439                          * is a blocking request which will send cancel
1440                          * by itself, or the lock is no longer unused or
1441                          * the lock has been used since the pf() call and
1442                          * pages could be put under it.
1443                          */
1444                         unlock_res_and_lock(lock);
1445                         lu_ref_del(&lock->l_reference,
1446                                    __func__, current);
1447                         LDLM_LOCK_RELEASE(lock);
1448                         spin_lock(&ns->ns_lock);
1449                         continue;
1450                 }
1451                 LASSERT(!lock->l_readers && !lock->l_writers);
1452
1453                 /* If we have chosen to cancel this lock voluntarily, we
1454                  * better send cancel notification to server, so that it
1455                  * frees appropriate state. This might lead to a race
1456                  * where while we are doing cancel here, server is also
1457                  * silently cancelling this lock.
1458                  */
1459                 ldlm_clear_cancel_on_block(lock);
1460
1461                 /* Setting the CBPENDING flag is a little misleading,
1462                  * but prevents an important race; namely, once
1463                  * CBPENDING is set, the lock can accumulate no more
1464                  * readers/writers. Since readers and writers are
1465                  * already zero here, ldlm_lock_decref() won't see
1466                  * this flag and call l_blocking_ast
1467                  */
1468                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING;
1469
1470                 /* We can't re-add to l_lru as it confuses the
1471                  * refcounting in ldlm_lock_remove_from_lru() if an AST
1472                  * arrives after we drop lr_lock below. We use l_bl_ast
1473                  * and can't use l_pending_chain as it is used both on
1474                  * server and client nevertheless bug 5666 says it is
1475                  * used only on server
1476                  */
1477                 LASSERT(list_empty(&lock->l_bl_ast));
1478                 list_add(&lock->l_bl_ast, cancels);
1479                 unlock_res_and_lock(lock);
1480                 lu_ref_del(&lock->l_reference, __func__, current);
1481                 spin_lock(&ns->ns_lock);
1482                 added++;
1483                 unused--;
1484         }
1485         spin_unlock(&ns->ns_lock);
1486         return added;
1487 }
1488
1489 int ldlm_cancel_lru_local(struct ldlm_namespace *ns,
1490                           struct list_head *cancels, int count, int max,
1491                           enum ldlm_cancel_flags cancel_flags, int flags)
1492 {
1493         int added;
1494
1495         added = ldlm_prepare_lru_list(ns, cancels, count, max, flags);
1496         if (added <= 0)
1497                 return added;
1498         return ldlm_cli_cancel_list_local(cancels, added, cancel_flags);
1499 }
1500
1501 /**
1502  * Cancel at least \a nr locks from given namespace LRU.
1503  *
1504  * When called with LCF_ASYNC the blocking callback will be handled
1505  * in a thread and this function will return after the thread has been
1506  * asked to call the callback.  When called with LCF_ASYNC the blocking
1507  * callback will be performed in this function.
1508  */
1509 int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
1510                     enum ldlm_cancel_flags cancel_flags,
1511                     int flags)
1512 {
1513         LIST_HEAD(cancels);
1514         int count, rc;
1515
1516         /* Just prepare the list of locks, do not actually cancel them yet.
1517          * Locks are cancelled later in a separate thread.
1518          */
1519         count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
1520         rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
1521         if (rc == 0)
1522                 return count;
1523
1524         return 0;
1525 }
1526
1527 /**
1528  * Find and cancel locally unused locks found on resource, matched to the
1529  * given policy, mode. GET the found locks and add them into the \a cancels
1530  * list.
1531  */
1532 int ldlm_cancel_resource_local(struct ldlm_resource *res,
1533                                struct list_head *cancels,
1534                                ldlm_policy_data_t *policy,
1535                                enum ldlm_mode mode, __u64 lock_flags,
1536                                enum ldlm_cancel_flags cancel_flags,
1537                                void *opaque)
1538 {
1539         struct ldlm_lock *lock;
1540         int count = 0;
1541
1542         lock_res(res);
1543         list_for_each_entry(lock, &res->lr_granted, l_res_link) {
1544                 if (opaque && lock->l_ast_data != opaque) {
1545                         LDLM_ERROR(lock, "data %p doesn't match opaque %p",
1546                                    lock->l_ast_data, opaque);
1547                         continue;
1548                 }
1549
1550                 if (lock->l_readers || lock->l_writers)
1551                         continue;
1552
1553                 /* If somebody is already doing CANCEL, or blocking AST came,
1554                  * skip this lock.
1555                  */
1556                 if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
1557                         continue;
1558
1559                 if (lockmode_compat(lock->l_granted_mode, mode))
1560                         continue;
1561
1562                 /* If policy is given and this is IBITS lock, add to list only
1563                  * those locks that match by policy.
1564                  */
1565                 if (policy && (lock->l_resource->lr_type == LDLM_IBITS) &&
1566                     !(lock->l_policy_data.l_inodebits.bits &
1567                       policy->l_inodebits.bits))
1568                         continue;
1569
1570                 /* See CBPENDING comment in ldlm_cancel_lru */
1571                 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_CANCELING |
1572                                  lock_flags;
1573
1574                 LASSERT(list_empty(&lock->l_bl_ast));
1575                 list_add(&lock->l_bl_ast, cancels);
1576                 LDLM_LOCK_GET(lock);
1577                 count++;
1578         }
1579         unlock_res(res);
1580
1581         return ldlm_cli_cancel_list_local(cancels, count, cancel_flags);
1582 }
1583 EXPORT_SYMBOL(ldlm_cancel_resource_local);
1584
1585 /**
1586  * Cancel client-side locks from a list and send/prepare cancel RPCs to the
1587  * server.
1588  * If \a req is NULL, send CANCEL request to server with handles of locks
1589  * in the \a cancels. If EARLY_CANCEL is not supported, send CANCEL requests
1590  * separately per lock.
1591  * If \a req is not NULL, put handles of locks in \a cancels into the request
1592  * buffer at the offset \a off.
1593  * Destroy \a cancels at the end.
1594  */
1595 int ldlm_cli_cancel_list(struct list_head *cancels, int count,
1596                          struct ptlrpc_request *req,
1597                          enum ldlm_cancel_flags flags)
1598 {
1599         struct ldlm_lock *lock;
1600         int res = 0;
1601
1602         if (list_empty(cancels) || count == 0)
1603                 return 0;
1604
1605         /* XXX: requests (both batched and not) could be sent in parallel.
1606          * Usually it is enough to have just 1 RPC, but it is possible that
1607          * there are too many locks to be cancelled in LRU or on a resource.
1608          * It would also speed up the case when the server does not support
1609          * the feature.
1610          */
1611         while (count > 0) {
1612                 LASSERT(!list_empty(cancels));
1613                 lock = list_entry(cancels->next, struct ldlm_lock, l_bl_ast);
1614                 LASSERT(lock->l_conn_export);
1615
1616                 if (exp_connect_cancelset(lock->l_conn_export)) {
1617                         res = count;
1618                         if (req)
1619                                 ldlm_cancel_pack(req, cancels, count);
1620                         else
1621                                 res = ldlm_cli_cancel_req(lock->l_conn_export,
1622                                                           cancels, count,
1623                                                           flags);
1624                 } else {
1625                         res = ldlm_cli_cancel_req(lock->l_conn_export,
1626                                                   cancels, 1, flags);
1627                 }
1628
1629                 if (res < 0) {
1630                         CDEBUG_LIMIT(res == -ESHUTDOWN ? D_DLMTRACE : D_ERROR,
1631                                      "ldlm_cli_cancel_list: %d\n", res);
1632                         res = count;
1633                 }
1634
1635                 count -= res;
1636                 ldlm_lock_list_put(cancels, l_bl_ast, res);
1637         }
1638         LASSERT(count == 0);
1639         return 0;
1640 }
1641 EXPORT_SYMBOL(ldlm_cli_cancel_list);
1642
1643 /**
1644  * Cancel all locks on a resource that have 0 readers/writers.
1645  *
1646  * If flags & LDLM_FL_LOCAL_ONLY, throw the locks away without trying
1647  * to notify the server.
1648  */
1649 int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
1650                                     const struct ldlm_res_id *res_id,
1651                                     ldlm_policy_data_t *policy,
1652                                     enum ldlm_mode mode,
1653                                     enum ldlm_cancel_flags flags,
1654                                     void *opaque)
1655 {
1656         struct ldlm_resource *res;
1657         LIST_HEAD(cancels);
1658         int count;
1659         int rc;
1660
1661         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1662         if (IS_ERR(res)) {
1663                 /* This is not a problem. */
1664                 CDEBUG(D_INFO, "No resource %llu\n", res_id->name[0]);
1665                 return 0;
1666         }
1667
1668         LDLM_RESOURCE_ADDREF(res);
1669         count = ldlm_cancel_resource_local(res, &cancels, policy, mode,
1670                                            0, flags | LCF_BL_AST, opaque);
1671         rc = ldlm_cli_cancel_list(&cancels, count, NULL, flags);
1672         if (rc != ELDLM_OK)
1673                 CERROR("canceling unused lock "DLDLMRES": rc = %d\n",
1674                        PLDLMRES(res), rc);
1675
1676         LDLM_RESOURCE_DELREF(res);
1677         ldlm_resource_putref(res);
1678         return 0;
1679 }
1680 EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
1681
1682 struct ldlm_cli_cancel_arg {
1683         int     lc_flags;
1684         void   *lc_opaque;
1685 };
1686
1687 static int ldlm_cli_hash_cancel_unused(struct cfs_hash *hs,
1688                                        struct cfs_hash_bd *bd,
1689                                        struct hlist_node *hnode, void *arg)
1690 {
1691         struct ldlm_resource       *res = cfs_hash_object(hs, hnode);
1692         struct ldlm_cli_cancel_arg     *lc = arg;
1693
1694         ldlm_cli_cancel_unused_resource(ldlm_res_to_ns(res), &res->lr_name,
1695                                         NULL, LCK_MINMODE,
1696                                         lc->lc_flags, lc->lc_opaque);
1697         /* must return 0 for hash iteration */
1698         return 0;
1699 }
1700
1701 /**
1702  * Cancel all locks on a namespace (or a specific resource, if given)
1703  * that have 0 readers/writers.
1704  *
1705  * If flags & LCF_LOCAL, throw the locks away without trying
1706  * to notify the server.
1707  */
1708 int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
1709                            const struct ldlm_res_id *res_id,
1710                            enum ldlm_cancel_flags flags, void *opaque)
1711 {
1712         struct ldlm_cli_cancel_arg arg = {
1713                 .lc_flags       = flags,
1714                 .lc_opaque      = opaque,
1715         };
1716
1717         if (!ns)
1718                 return ELDLM_OK;
1719
1720         if (res_id) {
1721                 return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
1722                                                        LCK_MINMODE, flags,
1723                                                        opaque);
1724         } else {
1725                 cfs_hash_for_each_nolock(ns->ns_rs_hash,
1726                                          ldlm_cli_hash_cancel_unused, &arg);
1727                 return ELDLM_OK;
1728         }
1729 }
1730 EXPORT_SYMBOL(ldlm_cli_cancel_unused);
1731
1732 /* Lock iterators. */
1733
1734 static int ldlm_resource_foreach(struct ldlm_resource *res,
1735                                  ldlm_iterator_t iter, void *closure)
1736 {
1737         struct list_head *tmp, *next;
1738         struct ldlm_lock *lock;
1739         int rc = LDLM_ITER_CONTINUE;
1740
1741         if (!res)
1742                 return LDLM_ITER_CONTINUE;
1743
1744         lock_res(res);
1745         list_for_each_safe(tmp, next, &res->lr_granted) {
1746                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1747
1748                 if (iter(lock, closure) == LDLM_ITER_STOP) {
1749                         rc = LDLM_ITER_STOP;
1750                         goto out;
1751                 }
1752         }
1753
1754         list_for_each_safe(tmp, next, &res->lr_waiting) {
1755                 lock = list_entry(tmp, struct ldlm_lock, l_res_link);
1756
1757                 if (iter(lock, closure) == LDLM_ITER_STOP) {
1758                         rc = LDLM_ITER_STOP;
1759                         goto out;
1760                 }
1761         }
1762  out:
1763         unlock_res(res);
1764         return rc;
1765 }
1766
1767 struct iter_helper_data {
1768         ldlm_iterator_t iter;
1769         void *closure;
1770 };
1771
1772 static int ldlm_iter_helper(struct ldlm_lock *lock, void *closure)
1773 {
1774         struct iter_helper_data *helper = closure;
1775
1776         return helper->iter(lock, helper->closure);
1777 }
1778
1779 static int ldlm_res_iter_helper(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1780                                 struct hlist_node *hnode, void *arg)
1781
1782 {
1783         struct ldlm_resource *res = cfs_hash_object(hs, hnode);
1784
1785         return ldlm_resource_foreach(res, ldlm_iter_helper, arg) ==
1786                LDLM_ITER_STOP;
1787 }
1788
1789 static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
1790                                    ldlm_iterator_t iter, void *closure)
1791
1792 {
1793         struct iter_helper_data helper = {
1794                 .iter           = iter,
1795                 .closure        = closure,
1796         };
1797
1798         cfs_hash_for_each_nolock(ns->ns_rs_hash,
1799                                  ldlm_res_iter_helper, &helper);
1800 }
1801
1802 /* non-blocking function to manipulate a lock whose cb_data is being put away.
1803  * return  0:  find no resource
1804  *       > 0:  must be LDLM_ITER_STOP/LDLM_ITER_CONTINUE.
1805  *       < 0:  errors
1806  */
1807 int ldlm_resource_iterate(struct ldlm_namespace *ns,
1808                           const struct ldlm_res_id *res_id,
1809                           ldlm_iterator_t iter, void *data)
1810 {
1811         struct ldlm_resource *res;
1812         int rc;
1813
1814         LASSERTF(ns, "must pass in namespace\n");
1815
1816         res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
1817         if (IS_ERR(res))
1818                 return 0;
1819
1820         LDLM_RESOURCE_ADDREF(res);
1821         rc = ldlm_resource_foreach(res, iter, data);
1822         LDLM_RESOURCE_DELREF(res);
1823         ldlm_resource_putref(res);
1824         return rc;
1825 }
1826 EXPORT_SYMBOL(ldlm_resource_iterate);
1827
1828 /* Lock replay */
1829
1830 static int ldlm_chain_lock_for_replay(struct ldlm_lock *lock, void *closure)
1831 {
1832         struct list_head *list = closure;
1833
1834         /* we use l_pending_chain here, because it's unused on clients. */
1835         LASSERTF(list_empty(&lock->l_pending_chain),
1836                  "lock %p next %p prev %p\n",
1837                  lock, &lock->l_pending_chain.next,
1838                  &lock->l_pending_chain.prev);
1839         /* bug 9573: don't replay locks left after eviction, or
1840          * bug 17614: locks being actively cancelled. Get a reference
1841          * on a lock so that it does not disappear under us (e.g. due to cancel)
1842          */
1843         if (!(lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCELING))) {
1844                 list_add(&lock->l_pending_chain, list);
1845                 LDLM_LOCK_GET(lock);
1846         }
1847
1848         return LDLM_ITER_CONTINUE;
1849 }
1850
1851 static int replay_lock_interpret(const struct lu_env *env,
1852                                  struct ptlrpc_request *req,
1853                                  struct ldlm_async_args *aa, int rc)
1854 {
1855         struct ldlm_lock     *lock;
1856         struct ldlm_reply    *reply;
1857         struct obd_export    *exp;
1858
1859         atomic_dec(&req->rq_import->imp_replay_inflight);
1860         if (rc != ELDLM_OK)
1861                 goto out;
1862
1863         reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
1864         if (!reply) {
1865                 rc = -EPROTO;
1866                 goto out;
1867         }
1868
1869         lock = ldlm_handle2lock(&aa->lock_handle);
1870         if (!lock) {
1871                 CERROR("received replay ack for unknown local cookie %#llx remote cookie %#llx from server %s id %s\n",
1872                        aa->lock_handle.cookie, reply->lock_handle.cookie,
1873                        req->rq_export->exp_client_uuid.uuid,
1874                        libcfs_id2str(req->rq_peer));
1875                 rc = -ESTALE;
1876                 goto out;
1877         }
1878
1879         /* Key change rehash lock in per-export hash with new key */
1880         exp = req->rq_export;
1881         if (exp && exp->exp_lock_hash) {
1882                 /* In the function below, .hs_keycmp resolves to
1883                  * ldlm_export_lock_keycmp()
1884                  */
1885                 /* coverity[overrun-buffer-val] */
1886                 cfs_hash_rehash_key(exp->exp_lock_hash,
1887                                     &lock->l_remote_handle,
1888                                     &reply->lock_handle,
1889                                     &lock->l_exp_hash);
1890         } else {
1891                 lock->l_remote_handle = reply->lock_handle;
1892         }
1893
1894         LDLM_DEBUG(lock, "replayed lock:");
1895         ptlrpc_import_recovery_state_machine(req->rq_import);
1896         LDLM_LOCK_PUT(lock);
1897 out:
1898         if (rc != ELDLM_OK)
1899                 ptlrpc_connect_import(req->rq_import);
1900
1901         return rc;
1902 }
1903
1904 static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
1905 {
1906         struct ptlrpc_request *req;
1907         struct ldlm_async_args *aa;
1908         struct ldlm_request   *body;
1909         int flags;
1910
1911         /* Bug 11974: Do not replay a lock which is actively being canceled */
1912         if (ldlm_is_canceling(lock)) {
1913                 LDLM_DEBUG(lock, "Not replaying canceled lock:");
1914                 return 0;
1915         }
1916
1917         /* If this is reply-less callback lock, we cannot replay it, since
1918          * server might have long dropped it, but notification of that event was
1919          * lost by network. (and server granted conflicting lock already)
1920          */
1921         if (ldlm_is_cancel_on_block(lock)) {
1922                 LDLM_DEBUG(lock, "Not replaying reply-less lock:");
1923                 ldlm_lock_cancel(lock);
1924                 return 0;
1925         }
1926
1927         /*
1928          * If granted mode matches the requested mode, this lock is granted.
1929          *
1930          * If they differ, but we have a granted mode, then we were granted
1931          * one mode and now want another: ergo, converting.
1932          *
1933          * If we haven't been granted anything and are on a resource list,
1934          * then we're blocked/waiting.
1935          *
1936          * If we haven't been granted anything and we're NOT on a resource list,
1937          * then we haven't got a reply yet and don't have a known disposition.
1938          * This happens whenever a lock enqueue is the request that triggers
1939          * recovery.
1940          */
1941         if (lock->l_granted_mode == lock->l_req_mode)
1942                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_GRANTED;
1943         else if (lock->l_granted_mode)
1944                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_CONV;
1945         else if (!list_empty(&lock->l_res_link))
1946                 flags = LDLM_FL_REPLAY | LDLM_FL_BLOCK_WAIT;
1947         else
1948                 flags = LDLM_FL_REPLAY;
1949
1950         req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
1951                                         LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
1952         if (!req)
1953                 return -ENOMEM;
1954
1955         /* We're part of recovery, so don't wait for it. */
1956         req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
1957
1958         body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
1959         ldlm_lock2desc(lock, &body->lock_desc);
1960         body->lock_flags = ldlm_flags_to_wire(flags);
1961
1962         ldlm_lock2handle(lock, &body->lock_handle[0]);
1963         if (lock->l_lvb_len > 0)
1964                 req_capsule_extend(&req->rq_pill, &RQF_LDLM_ENQUEUE_LVB);
1965         req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
1966                              lock->l_lvb_len);
1967         ptlrpc_request_set_replen(req);
1968         /* notify the server we've replayed all requests.
1969          * also, we mark the request to be put on a dedicated
1970          * queue to be processed after all request replayes.
1971          * bug 6063
1972          */
1973         lustre_msg_set_flags(req->rq_reqmsg, MSG_REQ_REPLAY_DONE);
1974
1975         LDLM_DEBUG(lock, "replaying lock:");
1976
1977         atomic_inc(&req->rq_import->imp_replay_inflight);
1978         CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1979         aa = ptlrpc_req_async_args(req);
1980         aa->lock_handle = body->lock_handle[0];
1981         req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
1982         ptlrpcd_add_req(req);
1983
1984         return 0;
1985 }
1986
1987 /**
1988  * Cancel as many unused locks as possible before replay. since we are
1989  * in recovery, we can't wait for any outstanding RPCs to send any RPC
1990  * to the server.
1991  *
1992  * Called only in recovery before replaying locks. there is no need to
1993  * replay locks that are unused. since the clients may hold thousands of
1994  * cached unused locks, dropping the unused locks can greatly reduce the
1995  * load on the servers at recovery time.
1996  */
1997 static void ldlm_cancel_unused_locks_for_replay(struct ldlm_namespace *ns)
1998 {
1999         int canceled;
2000         LIST_HEAD(cancels);
2001
2002         CDEBUG(D_DLMTRACE, "Dropping as many unused locks as possible before replay for namespace %s (%d)\n",
2003                ldlm_ns_name(ns), ns->ns_nr_unused);
2004
2005         /* We don't need to care whether or not LRU resize is enabled
2006          * because the LDLM_CANCEL_NO_WAIT policy doesn't use the
2007          * count parameter
2008          */
2009         canceled = ldlm_cancel_lru_local(ns, &cancels, ns->ns_nr_unused, 0,
2010                                          LCF_LOCAL, LDLM_CANCEL_NO_WAIT);
2011
2012         CDEBUG(D_DLMTRACE, "Canceled %d unused locks from namespace %s\n",
2013                canceled, ldlm_ns_name(ns));
2014 }
2015
2016 int ldlm_replay_locks(struct obd_import *imp)
2017 {
2018         struct ldlm_namespace *ns = imp->imp_obd->obd_namespace;
2019         LIST_HEAD(list);
2020         struct ldlm_lock *lock, *next;
2021         int rc = 0;
2022
2023         LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
2024
2025         /* don't replay locks if import failed recovery */
2026         if (imp->imp_vbr_failed)
2027                 return 0;
2028
2029         /* ensure this doesn't fall to 0 before all have been queued */
2030         atomic_inc(&imp->imp_replay_inflight);
2031
2032         if (ldlm_cancel_unused_locks_before_replay)
2033                 ldlm_cancel_unused_locks_for_replay(ns);
2034
2035         ldlm_namespace_foreach(ns, ldlm_chain_lock_for_replay, &list);
2036
2037         list_for_each_entry_safe(lock, next, &list, l_pending_chain) {
2038                 list_del_init(&lock->l_pending_chain);
2039                 if (rc) {
2040                         LDLM_LOCK_RELEASE(lock);
2041                         continue; /* or try to do the rest? */
2042                 }
2043                 rc = replay_one_lock(imp, lock);
2044                 LDLM_LOCK_RELEASE(lock);
2045         }
2046
2047         atomic_dec(&imp->imp_replay_inflight);
2048
2049         return rc;
2050 }
2051 EXPORT_SYMBOL(ldlm_replay_locks);